diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -1463,6 +1463,7 @@ let VLMul = MInfo.value in { def "_" # MInfo.MX : VPseudoBinaryNoMask; + let ForceTailAgnostic = true in def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMOutMask; } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv32.ll @@ -31,12 +31,12 @@ define @intrinsic_vmfeq_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu -; CHECK-NEXT: vmfeq.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,tu,mu -; CHECK-NEXT: vmfeq.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmfeq.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfeq.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfeq.nxv1f16( @@ -83,12 +83,12 @@ define @intrinsic_vmfeq_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu -; CHECK-NEXT: vmfeq.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,tu,mu -; CHECK-NEXT: vmfeq.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmfeq.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfeq.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfeq.nxv2f16( @@ -135,12 +135,12 @@ define @intrinsic_vmfeq_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu -; CHECK-NEXT: vmfeq.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu -; CHECK-NEXT: vmfeq.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmfeq.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfeq.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfeq.nxv4f16( @@ -187,12 +187,12 @@ define @intrinsic_vmfeq_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu -; CHECK-NEXT: vmfeq.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu -; CHECK-NEXT: vmfeq.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmfeq.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfeq.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfeq.nxv8f16( @@ -239,12 +239,12 @@ define @intrinsic_vmfeq_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu -; CHECK-NEXT: vmfeq.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu -; CHECK-NEXT: vmfeq.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmfeq.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfeq.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfeq.nxv16f16( @@ -291,12 +291,12 @@ define @intrinsic_vmfeq_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu -; CHECK-NEXT: vmfeq.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,tu,mu -; CHECK-NEXT: vmfeq.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmfeq.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfeq.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfeq.nxv1f32( @@ -343,12 +343,12 @@ define @intrinsic_vmfeq_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu -; CHECK-NEXT: vmfeq.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu -; CHECK-NEXT: vmfeq.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmfeq.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfeq.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfeq.nxv2f32( @@ -395,12 +395,12 @@ define @intrinsic_vmfeq_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu -; CHECK-NEXT: vmfeq.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu -; CHECK-NEXT: vmfeq.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmfeq.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfeq.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfeq.nxv4f32( @@ -447,12 +447,12 @@ define @intrinsic_vmfeq_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu -; CHECK-NEXT: vmfeq.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e32,m4,tu,mu -; CHECK-NEXT: vmfeq.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmfeq.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfeq.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfeq.nxv8f32( @@ -499,12 +499,12 @@ define @intrinsic_vmfeq_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu -; CHECK-NEXT: vmfeq.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu -; CHECK-NEXT: vmfeq.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmfeq.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfeq.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfeq.nxv1f64( @@ -551,12 +551,12 @@ define @intrinsic_vmfeq_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu -; CHECK-NEXT: vmfeq.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu -; CHECK-NEXT: vmfeq.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmfeq.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfeq.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfeq.nxv2f64( @@ -603,12 +603,12 @@ define @intrinsic_vmfeq_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu -; CHECK-NEXT: vmfeq.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu -; CHECK-NEXT: vmfeq.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmfeq.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfeq.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfeq.nxv4f64( @@ -658,7 +658,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfeq.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -707,7 +707,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfeq.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -756,7 +756,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfeq.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -805,7 +805,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmfeq.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -854,7 +854,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmfeq.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -903,7 +903,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfeq.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -952,7 +952,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfeq.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1001,7 +1001,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmfeq.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1050,7 +1050,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmfeq.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1106,7 +1106,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a2, e64,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a2, e64,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfeq.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1163,7 +1163,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a2, e64,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a2, e64,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmfeq.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1220,7 +1220,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a2, e64,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a2, e64,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmfeq.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv64.ll @@ -31,12 +31,12 @@ define @intrinsic_vmfeq_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu -; CHECK-NEXT: vmfeq.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,tu,mu -; CHECK-NEXT: vmfeq.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmfeq.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfeq.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfeq.nxv1f16( @@ -83,12 +83,12 @@ define @intrinsic_vmfeq_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu -; CHECK-NEXT: vmfeq.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,tu,mu -; CHECK-NEXT: vmfeq.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmfeq.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfeq.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfeq.nxv2f16( @@ -135,12 +135,12 @@ define @intrinsic_vmfeq_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu -; CHECK-NEXT: vmfeq.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu -; CHECK-NEXT: vmfeq.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmfeq.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfeq.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfeq.nxv4f16( @@ -187,12 +187,12 @@ define @intrinsic_vmfeq_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu -; CHECK-NEXT: vmfeq.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu -; CHECK-NEXT: vmfeq.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmfeq.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfeq.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfeq.nxv8f16( @@ -239,12 +239,12 @@ define @intrinsic_vmfeq_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu -; CHECK-NEXT: vmfeq.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu -; CHECK-NEXT: vmfeq.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmfeq.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfeq.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfeq.nxv16f16( @@ -291,12 +291,12 @@ define @intrinsic_vmfeq_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu -; CHECK-NEXT: vmfeq.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,tu,mu -; CHECK-NEXT: vmfeq.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmfeq.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfeq.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfeq.nxv1f32( @@ -343,12 +343,12 @@ define @intrinsic_vmfeq_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu -; CHECK-NEXT: vmfeq.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu -; CHECK-NEXT: vmfeq.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmfeq.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfeq.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfeq.nxv2f32( @@ -395,12 +395,12 @@ define @intrinsic_vmfeq_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu -; CHECK-NEXT: vmfeq.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu -; CHECK-NEXT: vmfeq.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmfeq.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfeq.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfeq.nxv4f32( @@ -447,12 +447,12 @@ define @intrinsic_vmfeq_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu -; CHECK-NEXT: vmfeq.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e32,m4,tu,mu -; CHECK-NEXT: vmfeq.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmfeq.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfeq.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfeq.nxv8f32( @@ -499,12 +499,12 @@ define @intrinsic_vmfeq_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu -; CHECK-NEXT: vmfeq.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu -; CHECK-NEXT: vmfeq.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmfeq.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfeq.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfeq.nxv1f64( @@ -551,12 +551,12 @@ define @intrinsic_vmfeq_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu -; CHECK-NEXT: vmfeq.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu -; CHECK-NEXT: vmfeq.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmfeq.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfeq.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfeq.nxv2f64( @@ -603,12 +603,12 @@ define @intrinsic_vmfeq_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu -; CHECK-NEXT: vmfeq.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu -; CHECK-NEXT: vmfeq.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmfeq.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfeq.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfeq.nxv4f64( @@ -658,7 +658,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfeq.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -707,7 +707,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfeq.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -756,7 +756,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfeq.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -805,7 +805,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmfeq.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -854,7 +854,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmfeq.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -903,7 +903,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfeq.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -952,7 +952,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfeq.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1001,7 +1001,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmfeq.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1050,7 +1050,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmfeq.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1099,7 +1099,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfeq.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1148,7 +1148,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmfeq.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1197,7 +1197,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmfeq.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfge-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmfge-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmfge-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfge-rv32.ll @@ -31,12 +31,12 @@ define @intrinsic_vmfge_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu -; CHECK-NEXT: vmfle.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,tu,mu -; CHECK-NEXT: vmfle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmfle.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfge.nxv1f16( @@ -83,12 +83,12 @@ define @intrinsic_vmfge_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu -; CHECK-NEXT: vmfle.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,tu,mu -; CHECK-NEXT: vmfle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmfle.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfge.nxv2f16( @@ -135,12 +135,12 @@ define @intrinsic_vmfge_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu -; CHECK-NEXT: vmfle.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu -; CHECK-NEXT: vmfle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmfle.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfge.nxv4f16( @@ -187,12 +187,12 @@ define @intrinsic_vmfge_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu -; CHECK-NEXT: vmfle.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu -; CHECK-NEXT: vmfle.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmfle.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfge.nxv8f16( @@ -239,12 +239,12 @@ define @intrinsic_vmfge_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu -; CHECK-NEXT: vmfle.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu -; CHECK-NEXT: vmfle.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmfle.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfge.nxv16f16( @@ -291,12 +291,12 @@ define @intrinsic_vmfge_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu -; CHECK-NEXT: vmfle.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,tu,mu -; CHECK-NEXT: vmfle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmfle.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfge.nxv1f32( @@ -343,12 +343,12 @@ define @intrinsic_vmfge_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu -; CHECK-NEXT: vmfle.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu -; CHECK-NEXT: vmfle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmfle.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfge.nxv2f32( @@ -395,12 +395,12 @@ define @intrinsic_vmfge_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu -; CHECK-NEXT: vmfle.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu -; CHECK-NEXT: vmfle.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmfle.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfge.nxv4f32( @@ -447,12 +447,12 @@ define @intrinsic_vmfge_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu -; CHECK-NEXT: vmfle.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,m4,tu,mu -; CHECK-NEXT: vmfle.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmfle.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfge.nxv8f32( @@ -499,12 +499,12 @@ define @intrinsic_vmfge_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu -; CHECK-NEXT: vmfle.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu -; CHECK-NEXT: vmfle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmfle.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfge.nxv1f64( @@ -551,12 +551,12 @@ define @intrinsic_vmfge_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu -; CHECK-NEXT: vmfle.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu -; CHECK-NEXT: vmfle.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmfle.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfge.nxv2f64( @@ -603,12 +603,12 @@ define @intrinsic_vmfge_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu -; CHECK-NEXT: vmfle.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu -; CHECK-NEXT: vmfle.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmfle.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfge.nxv4f64( @@ -658,7 +658,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfge.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -707,7 +707,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfge.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -756,7 +756,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfge.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -805,7 +805,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmfge.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -854,7 +854,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmfge.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -903,7 +903,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfge.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -952,7 +952,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfge.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1001,7 +1001,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmfge.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1050,7 +1050,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmfge.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1106,7 +1106,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a2, e64,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a2, e64,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfge.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1163,7 +1163,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a2, e64,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a2, e64,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmfge.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1220,7 +1220,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a2, e64,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a2, e64,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmfge.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfge-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmfge-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmfge-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfge-rv64.ll @@ -31,12 +31,12 @@ define @intrinsic_vmfge_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu -; CHECK-NEXT: vmfle.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,tu,mu -; CHECK-NEXT: vmfle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmfle.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfge.nxv1f16( @@ -83,12 +83,12 @@ define @intrinsic_vmfge_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu -; CHECK-NEXT: vmfle.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,tu,mu -; CHECK-NEXT: vmfle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmfle.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfge.nxv2f16( @@ -135,12 +135,12 @@ define @intrinsic_vmfge_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu -; CHECK-NEXT: vmfle.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu -; CHECK-NEXT: vmfle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmfle.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfge.nxv4f16( @@ -187,12 +187,12 @@ define @intrinsic_vmfge_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu -; CHECK-NEXT: vmfle.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu -; CHECK-NEXT: vmfle.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmfle.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfge.nxv8f16( @@ -239,12 +239,12 @@ define @intrinsic_vmfge_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu -; CHECK-NEXT: vmfle.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu -; CHECK-NEXT: vmfle.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmfle.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfge.nxv16f16( @@ -291,12 +291,12 @@ define @intrinsic_vmfge_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu -; CHECK-NEXT: vmfle.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,tu,mu -; CHECK-NEXT: vmfle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmfle.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfge.nxv1f32( @@ -343,12 +343,12 @@ define @intrinsic_vmfge_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu -; CHECK-NEXT: vmfle.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu -; CHECK-NEXT: vmfle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmfle.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfge.nxv2f32( @@ -395,12 +395,12 @@ define @intrinsic_vmfge_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu -; CHECK-NEXT: vmfle.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu -; CHECK-NEXT: vmfle.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmfle.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfge.nxv4f32( @@ -447,12 +447,12 @@ define @intrinsic_vmfge_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu -; CHECK-NEXT: vmfle.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,m4,tu,mu -; CHECK-NEXT: vmfle.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmfle.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfge.nxv8f32( @@ -499,12 +499,12 @@ define @intrinsic_vmfge_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu -; CHECK-NEXT: vmfle.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu -; CHECK-NEXT: vmfle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmfle.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfge.nxv1f64( @@ -551,12 +551,12 @@ define @intrinsic_vmfge_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu -; CHECK-NEXT: vmfle.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu -; CHECK-NEXT: vmfle.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmfle.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfge.nxv2f64( @@ -603,12 +603,12 @@ define @intrinsic_vmfge_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu -; CHECK-NEXT: vmfle.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu -; CHECK-NEXT: vmfle.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmfle.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfge.nxv4f64( @@ -658,7 +658,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfge.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -707,7 +707,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfge.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -756,7 +756,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfge.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -805,7 +805,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmfge.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -854,7 +854,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmfge.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -903,7 +903,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfge.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -952,7 +952,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfge.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1001,7 +1001,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmfge.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1050,7 +1050,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmfge.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1099,7 +1099,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfge.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1148,7 +1148,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmfge.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1197,7 +1197,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmfge.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv32.ll @@ -31,12 +31,12 @@ define @intrinsic_vmfgt_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu -; CHECK-NEXT: vmflt.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,tu,mu -; CHECK-NEXT: vmflt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmflt.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfgt.nxv1f16( @@ -83,12 +83,12 @@ define @intrinsic_vmfgt_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu -; CHECK-NEXT: vmflt.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,tu,mu -; CHECK-NEXT: vmflt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmflt.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfgt.nxv2f16( @@ -135,12 +135,12 @@ define @intrinsic_vmfgt_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu -; CHECK-NEXT: vmflt.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu -; CHECK-NEXT: vmflt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmflt.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfgt.nxv4f16( @@ -187,12 +187,12 @@ define @intrinsic_vmfgt_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu -; CHECK-NEXT: vmflt.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu -; CHECK-NEXT: vmflt.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmflt.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfgt.nxv8f16( @@ -239,12 +239,12 @@ define @intrinsic_vmfgt_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu -; CHECK-NEXT: vmflt.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu -; CHECK-NEXT: vmflt.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmflt.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfgt.nxv16f16( @@ -291,12 +291,12 @@ define @intrinsic_vmfgt_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu -; CHECK-NEXT: vmflt.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,tu,mu -; CHECK-NEXT: vmflt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmflt.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfgt.nxv1f32( @@ -343,12 +343,12 @@ define @intrinsic_vmfgt_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu -; CHECK-NEXT: vmflt.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu -; CHECK-NEXT: vmflt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmflt.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfgt.nxv2f32( @@ -395,12 +395,12 @@ define @intrinsic_vmfgt_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu -; CHECK-NEXT: vmflt.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu -; CHECK-NEXT: vmflt.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmflt.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfgt.nxv4f32( @@ -447,12 +447,12 @@ define @intrinsic_vmfgt_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu -; CHECK-NEXT: vmflt.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,m4,tu,mu -; CHECK-NEXT: vmflt.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmflt.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfgt.nxv8f32( @@ -499,12 +499,12 @@ define @intrinsic_vmfgt_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu -; CHECK-NEXT: vmflt.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu -; CHECK-NEXT: vmflt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmflt.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfgt.nxv1f64( @@ -551,12 +551,12 @@ define @intrinsic_vmfgt_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu -; CHECK-NEXT: vmflt.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu -; CHECK-NEXT: vmflt.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmflt.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfgt.nxv2f64( @@ -603,12 +603,12 @@ define @intrinsic_vmfgt_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu -; CHECK-NEXT: vmflt.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu -; CHECK-NEXT: vmflt.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmflt.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfgt.nxv4f64( @@ -658,7 +658,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfgt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -707,7 +707,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfgt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -756,7 +756,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfgt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -805,7 +805,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmfgt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -854,7 +854,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmfgt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -903,7 +903,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfgt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -952,7 +952,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfgt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1001,7 +1001,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmfgt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1050,7 +1050,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmfgt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1106,7 +1106,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a2, e64,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a2, e64,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfgt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1163,7 +1163,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a2, e64,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a2, e64,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmfgt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1220,7 +1220,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a2, e64,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a2, e64,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmfgt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv64.ll @@ -31,12 +31,12 @@ define @intrinsic_vmfgt_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu -; CHECK-NEXT: vmflt.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,tu,mu -; CHECK-NEXT: vmflt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmflt.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfgt.nxv1f16( @@ -83,12 +83,12 @@ define @intrinsic_vmfgt_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu -; CHECK-NEXT: vmflt.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,tu,mu -; CHECK-NEXT: vmflt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmflt.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfgt.nxv2f16( @@ -135,12 +135,12 @@ define @intrinsic_vmfgt_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu -; CHECK-NEXT: vmflt.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu -; CHECK-NEXT: vmflt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmflt.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfgt.nxv4f16( @@ -187,12 +187,12 @@ define @intrinsic_vmfgt_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu -; CHECK-NEXT: vmflt.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu -; CHECK-NEXT: vmflt.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmflt.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfgt.nxv8f16( @@ -239,12 +239,12 @@ define @intrinsic_vmfgt_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu -; CHECK-NEXT: vmflt.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu -; CHECK-NEXT: vmflt.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmflt.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfgt.nxv16f16( @@ -291,12 +291,12 @@ define @intrinsic_vmfgt_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu -; CHECK-NEXT: vmflt.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,tu,mu -; CHECK-NEXT: vmflt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmflt.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfgt.nxv1f32( @@ -343,12 +343,12 @@ define @intrinsic_vmfgt_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu -; CHECK-NEXT: vmflt.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu -; CHECK-NEXT: vmflt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmflt.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfgt.nxv2f32( @@ -395,12 +395,12 @@ define @intrinsic_vmfgt_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu -; CHECK-NEXT: vmflt.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu -; CHECK-NEXT: vmflt.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmflt.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfgt.nxv4f32( @@ -447,12 +447,12 @@ define @intrinsic_vmfgt_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu -; CHECK-NEXT: vmflt.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,m4,tu,mu -; CHECK-NEXT: vmflt.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmflt.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfgt.nxv8f32( @@ -499,12 +499,12 @@ define @intrinsic_vmfgt_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu -; CHECK-NEXT: vmflt.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu -; CHECK-NEXT: vmflt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmflt.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfgt.nxv1f64( @@ -551,12 +551,12 @@ define @intrinsic_vmfgt_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu -; CHECK-NEXT: vmflt.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu -; CHECK-NEXT: vmflt.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmflt.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfgt.nxv2f64( @@ -603,12 +603,12 @@ define @intrinsic_vmfgt_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu -; CHECK-NEXT: vmflt.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu -; CHECK-NEXT: vmflt.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmflt.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfgt.nxv4f64( @@ -658,7 +658,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfgt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -707,7 +707,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfgt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -756,7 +756,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfgt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -805,7 +805,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmfgt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -854,7 +854,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmfgt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -903,7 +903,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfgt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -952,7 +952,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfgt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1001,7 +1001,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmfgt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1050,7 +1050,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmfgt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1099,7 +1099,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfgt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1148,7 +1148,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmfgt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1197,7 +1197,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmfgt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfle-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmfle-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmfle-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfle-rv32.ll @@ -31,12 +31,12 @@ define @intrinsic_vmfle_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu -; CHECK-NEXT: vmfle.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,tu,mu -; CHECK-NEXT: vmfle.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmfle.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfle.nxv1f16( @@ -83,12 +83,12 @@ define @intrinsic_vmfle_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu -; CHECK-NEXT: vmfle.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,tu,mu -; CHECK-NEXT: vmfle.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmfle.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfle.nxv2f16( @@ -135,12 +135,12 @@ define @intrinsic_vmfle_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu -; CHECK-NEXT: vmfle.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu -; CHECK-NEXT: vmfle.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmfle.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfle.nxv4f16( @@ -187,12 +187,12 @@ define @intrinsic_vmfle_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu -; CHECK-NEXT: vmfle.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu -; CHECK-NEXT: vmfle.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmfle.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfle.nxv8f16( @@ -239,12 +239,12 @@ define @intrinsic_vmfle_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu -; CHECK-NEXT: vmfle.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu -; CHECK-NEXT: vmfle.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmfle.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfle.nxv16f16( @@ -291,12 +291,12 @@ define @intrinsic_vmfle_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu -; CHECK-NEXT: vmfle.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,tu,mu -; CHECK-NEXT: vmfle.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmfle.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfle.nxv1f32( @@ -343,12 +343,12 @@ define @intrinsic_vmfle_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu -; CHECK-NEXT: vmfle.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu -; CHECK-NEXT: vmfle.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmfle.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfle.nxv2f32( @@ -395,12 +395,12 @@ define @intrinsic_vmfle_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu -; CHECK-NEXT: vmfle.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu -; CHECK-NEXT: vmfle.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmfle.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfle.nxv4f32( @@ -447,12 +447,12 @@ define @intrinsic_vmfle_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu -; CHECK-NEXT: vmfle.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e32,m4,tu,mu -; CHECK-NEXT: vmfle.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmfle.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfle.nxv8f32( @@ -499,12 +499,12 @@ define @intrinsic_vmfle_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu -; CHECK-NEXT: vmfle.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu -; CHECK-NEXT: vmfle.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmfle.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfle.nxv1f64( @@ -551,12 +551,12 @@ define @intrinsic_vmfle_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu -; CHECK-NEXT: vmfle.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu -; CHECK-NEXT: vmfle.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmfle.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfle.nxv2f64( @@ -603,12 +603,12 @@ define @intrinsic_vmfle_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu -; CHECK-NEXT: vmfle.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu -; CHECK-NEXT: vmfle.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmfle.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfle.nxv4f64( @@ -658,7 +658,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfle.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -707,7 +707,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfle.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -756,7 +756,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfle.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -805,7 +805,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmfle.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -854,7 +854,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmfle.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -903,7 +903,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfle.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -952,7 +952,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfle.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1001,7 +1001,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmfle.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1050,7 +1050,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmfle.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1106,7 +1106,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a2, e64,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a2, e64,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfle.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1163,7 +1163,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a2, e64,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a2, e64,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmfle.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1220,7 +1220,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a2, e64,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a2, e64,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmfle.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfle-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmfle-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmfle-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfle-rv64.ll @@ -31,12 +31,12 @@ define @intrinsic_vmfle_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu -; CHECK-NEXT: vmfle.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,tu,mu -; CHECK-NEXT: vmfle.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmfle.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfle.nxv1f16( @@ -83,12 +83,12 @@ define @intrinsic_vmfle_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu -; CHECK-NEXT: vmfle.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,tu,mu -; CHECK-NEXT: vmfle.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmfle.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfle.nxv2f16( @@ -135,12 +135,12 @@ define @intrinsic_vmfle_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu -; CHECK-NEXT: vmfle.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu -; CHECK-NEXT: vmfle.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmfle.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfle.nxv4f16( @@ -187,12 +187,12 @@ define @intrinsic_vmfle_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu -; CHECK-NEXT: vmfle.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu -; CHECK-NEXT: vmfle.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmfle.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfle.nxv8f16( @@ -239,12 +239,12 @@ define @intrinsic_vmfle_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu -; CHECK-NEXT: vmfle.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu -; CHECK-NEXT: vmfle.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmfle.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfle.nxv16f16( @@ -291,12 +291,12 @@ define @intrinsic_vmfle_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu -; CHECK-NEXT: vmfle.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,tu,mu -; CHECK-NEXT: vmfle.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmfle.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfle.nxv1f32( @@ -343,12 +343,12 @@ define @intrinsic_vmfle_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu -; CHECK-NEXT: vmfle.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu -; CHECK-NEXT: vmfle.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmfle.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfle.nxv2f32( @@ -395,12 +395,12 @@ define @intrinsic_vmfle_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu -; CHECK-NEXT: vmfle.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu -; CHECK-NEXT: vmfle.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmfle.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfle.nxv4f32( @@ -447,12 +447,12 @@ define @intrinsic_vmfle_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu -; CHECK-NEXT: vmfle.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e32,m4,tu,mu -; CHECK-NEXT: vmfle.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmfle.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfle.nxv8f32( @@ -499,12 +499,12 @@ define @intrinsic_vmfle_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu -; CHECK-NEXT: vmfle.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu -; CHECK-NEXT: vmfle.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmfle.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfle.nxv1f64( @@ -551,12 +551,12 @@ define @intrinsic_vmfle_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu -; CHECK-NEXT: vmfle.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu -; CHECK-NEXT: vmfle.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmfle.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfle.nxv2f64( @@ -603,12 +603,12 @@ define @intrinsic_vmfle_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu -; CHECK-NEXT: vmfle.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu -; CHECK-NEXT: vmfle.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmfle.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfle.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfle.nxv4f64( @@ -658,7 +658,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfle.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -707,7 +707,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfle.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -756,7 +756,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfle.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -805,7 +805,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmfle.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -854,7 +854,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmfle.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -903,7 +903,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfle.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -952,7 +952,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfle.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1001,7 +1001,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmfle.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1050,7 +1050,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmfle.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1099,7 +1099,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfle.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1148,7 +1148,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmfle.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1197,7 +1197,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmfle.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmflt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmflt-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmflt-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmflt-rv32.ll @@ -31,12 +31,12 @@ define @intrinsic_vmflt_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu -; CHECK-NEXT: vmflt.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,tu,mu -; CHECK-NEXT: vmflt.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmflt.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmflt.nxv1f16( @@ -83,12 +83,12 @@ define @intrinsic_vmflt_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu -; CHECK-NEXT: vmflt.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,tu,mu -; CHECK-NEXT: vmflt.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmflt.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmflt.nxv2f16( @@ -135,12 +135,12 @@ define @intrinsic_vmflt_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu -; CHECK-NEXT: vmflt.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu -; CHECK-NEXT: vmflt.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmflt.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmflt.nxv4f16( @@ -187,12 +187,12 @@ define @intrinsic_vmflt_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu -; CHECK-NEXT: vmflt.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu -; CHECK-NEXT: vmflt.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmflt.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmflt.nxv8f16( @@ -239,12 +239,12 @@ define @intrinsic_vmflt_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu -; CHECK-NEXT: vmflt.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu -; CHECK-NEXT: vmflt.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmflt.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmflt.nxv16f16( @@ -291,12 +291,12 @@ define @intrinsic_vmflt_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu -; CHECK-NEXT: vmflt.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,tu,mu -; CHECK-NEXT: vmflt.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmflt.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmflt.nxv1f32( @@ -343,12 +343,12 @@ define @intrinsic_vmflt_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu -; CHECK-NEXT: vmflt.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu -; CHECK-NEXT: vmflt.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmflt.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmflt.nxv2f32( @@ -395,12 +395,12 @@ define @intrinsic_vmflt_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu -; CHECK-NEXT: vmflt.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu -; CHECK-NEXT: vmflt.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmflt.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmflt.nxv4f32( @@ -447,12 +447,12 @@ define @intrinsic_vmflt_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu -; CHECK-NEXT: vmflt.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e32,m4,tu,mu -; CHECK-NEXT: vmflt.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmflt.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmflt.nxv8f32( @@ -499,12 +499,12 @@ define @intrinsic_vmflt_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu -; CHECK-NEXT: vmflt.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu -; CHECK-NEXT: vmflt.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmflt.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmflt.nxv1f64( @@ -551,12 +551,12 @@ define @intrinsic_vmflt_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu -; CHECK-NEXT: vmflt.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu -; CHECK-NEXT: vmflt.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmflt.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmflt.nxv2f64( @@ -603,12 +603,12 @@ define @intrinsic_vmflt_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu -; CHECK-NEXT: vmflt.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu -; CHECK-NEXT: vmflt.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmflt.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmflt.nxv4f64( @@ -658,7 +658,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -707,7 +707,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -756,7 +756,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -805,7 +805,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -854,7 +854,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -903,7 +903,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -952,7 +952,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1001,7 +1001,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1050,7 +1050,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1106,7 +1106,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a2, e64,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a2, e64,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1163,7 +1163,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a2, e64,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a2, e64,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1220,7 +1220,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a2, e64,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a2, e64,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmflt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmflt-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmflt-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmflt-rv64.ll @@ -31,12 +31,12 @@ define @intrinsic_vmflt_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu -; CHECK-NEXT: vmflt.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,tu,mu -; CHECK-NEXT: vmflt.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmflt.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmflt.nxv1f16( @@ -83,12 +83,12 @@ define @intrinsic_vmflt_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu -; CHECK-NEXT: vmflt.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,tu,mu -; CHECK-NEXT: vmflt.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmflt.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmflt.nxv2f16( @@ -135,12 +135,12 @@ define @intrinsic_vmflt_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu -; CHECK-NEXT: vmflt.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu -; CHECK-NEXT: vmflt.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmflt.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmflt.nxv4f16( @@ -187,12 +187,12 @@ define @intrinsic_vmflt_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu -; CHECK-NEXT: vmflt.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu -; CHECK-NEXT: vmflt.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmflt.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmflt.nxv8f16( @@ -239,12 +239,12 @@ define @intrinsic_vmflt_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu -; CHECK-NEXT: vmflt.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu -; CHECK-NEXT: vmflt.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmflt.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmflt.nxv16f16( @@ -291,12 +291,12 @@ define @intrinsic_vmflt_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu -; CHECK-NEXT: vmflt.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,tu,mu -; CHECK-NEXT: vmflt.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmflt.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmflt.nxv1f32( @@ -343,12 +343,12 @@ define @intrinsic_vmflt_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu -; CHECK-NEXT: vmflt.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu -; CHECK-NEXT: vmflt.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmflt.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmflt.nxv2f32( @@ -395,12 +395,12 @@ define @intrinsic_vmflt_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu -; CHECK-NEXT: vmflt.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu -; CHECK-NEXT: vmflt.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmflt.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmflt.nxv4f32( @@ -447,12 +447,12 @@ define @intrinsic_vmflt_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu -; CHECK-NEXT: vmflt.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e32,m4,tu,mu -; CHECK-NEXT: vmflt.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmflt.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmflt.nxv8f32( @@ -499,12 +499,12 @@ define @intrinsic_vmflt_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu -; CHECK-NEXT: vmflt.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu -; CHECK-NEXT: vmflt.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmflt.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmflt.nxv1f64( @@ -551,12 +551,12 @@ define @intrinsic_vmflt_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu -; CHECK-NEXT: vmflt.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu -; CHECK-NEXT: vmflt.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmflt.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmflt.nxv2f64( @@ -603,12 +603,12 @@ define @intrinsic_vmflt_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu -; CHECK-NEXT: vmflt.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu -; CHECK-NEXT: vmflt.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmflt.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmflt.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmflt.nxv4f64( @@ -658,7 +658,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -707,7 +707,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -756,7 +756,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -805,7 +805,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -854,7 +854,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -903,7 +903,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -952,7 +952,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1001,7 +1001,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1050,7 +1050,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1099,7 +1099,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1148,7 +1148,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1197,7 +1197,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfne-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmfne-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmfne-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfne-rv32.ll @@ -31,12 +31,12 @@ define @intrinsic_vmfne_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu -; CHECK-NEXT: vmfne.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,tu,mu -; CHECK-NEXT: vmfne.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmfne.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfne.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfne.nxv1f16( @@ -83,12 +83,12 @@ define @intrinsic_vmfne_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu -; CHECK-NEXT: vmfne.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,tu,mu -; CHECK-NEXT: vmfne.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmfne.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfne.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfne.nxv2f16( @@ -135,12 +135,12 @@ define @intrinsic_vmfne_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu -; CHECK-NEXT: vmfne.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu -; CHECK-NEXT: vmfne.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmfne.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfne.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfne.nxv4f16( @@ -187,12 +187,12 @@ define @intrinsic_vmfne_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu -; CHECK-NEXT: vmfne.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu -; CHECK-NEXT: vmfne.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmfne.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfne.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfne.nxv8f16( @@ -239,12 +239,12 @@ define @intrinsic_vmfne_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu -; CHECK-NEXT: vmfne.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu -; CHECK-NEXT: vmfne.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmfne.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfne.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfne.nxv16f16( @@ -291,12 +291,12 @@ define @intrinsic_vmfne_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu -; CHECK-NEXT: vmfne.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,tu,mu -; CHECK-NEXT: vmfne.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmfne.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfne.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfne.nxv1f32( @@ -343,12 +343,12 @@ define @intrinsic_vmfne_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu -; CHECK-NEXT: vmfne.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu -; CHECK-NEXT: vmfne.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmfne.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfne.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfne.nxv2f32( @@ -395,12 +395,12 @@ define @intrinsic_vmfne_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu -; CHECK-NEXT: vmfne.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu -; CHECK-NEXT: vmfne.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmfne.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfne.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfne.nxv4f32( @@ -447,12 +447,12 @@ define @intrinsic_vmfne_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu -; CHECK-NEXT: vmfne.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e32,m4,tu,mu -; CHECK-NEXT: vmfne.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmfne.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfne.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfne.nxv8f32( @@ -499,12 +499,12 @@ define @intrinsic_vmfne_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu -; CHECK-NEXT: vmfne.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu -; CHECK-NEXT: vmfne.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmfne.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfne.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfne.nxv1f64( @@ -551,12 +551,12 @@ define @intrinsic_vmfne_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu -; CHECK-NEXT: vmfne.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu -; CHECK-NEXT: vmfne.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmfne.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfne.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfne.nxv2f64( @@ -603,12 +603,12 @@ define @intrinsic_vmfne_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu -; CHECK-NEXT: vmfne.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu -; CHECK-NEXT: vmfne.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmfne.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfne.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfne.nxv4f64( @@ -658,7 +658,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfne.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -707,7 +707,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfne.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -756,7 +756,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfne.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -805,7 +805,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmfne.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -854,7 +854,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmfne.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -903,7 +903,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfne.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -952,7 +952,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfne.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1001,7 +1001,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmfne.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1050,7 +1050,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmfne.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1106,7 +1106,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a2, e64,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a2, e64,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfne.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1163,7 +1163,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a2, e64,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a2, e64,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmfne.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1220,7 +1220,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a2, e64,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a2, e64,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmfne.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfne-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmfne-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmfne-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfne-rv64.ll @@ -31,12 +31,12 @@ define @intrinsic_vmfne_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu -; CHECK-NEXT: vmfne.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,tu,mu -; CHECK-NEXT: vmfne.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmfne.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfne.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfne.nxv1f16( @@ -83,12 +83,12 @@ define @intrinsic_vmfne_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu -; CHECK-NEXT: vmfne.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,tu,mu -; CHECK-NEXT: vmfne.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmfne.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfne.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfne.nxv2f16( @@ -135,12 +135,12 @@ define @intrinsic_vmfne_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu -; CHECK-NEXT: vmfne.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu -; CHECK-NEXT: vmfne.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmfne.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfne.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfne.nxv4f16( @@ -187,12 +187,12 @@ define @intrinsic_vmfne_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu -; CHECK-NEXT: vmfne.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu -; CHECK-NEXT: vmfne.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmfne.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfne.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfne.nxv8f16( @@ -239,12 +239,12 @@ define @intrinsic_vmfne_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu -; CHECK-NEXT: vmfne.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu -; CHECK-NEXT: vmfne.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmfne.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfne.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfne.nxv16f16( @@ -291,12 +291,12 @@ define @intrinsic_vmfne_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu -; CHECK-NEXT: vmfne.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,tu,mu -; CHECK-NEXT: vmfne.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmfne.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfne.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfne.nxv1f32( @@ -343,12 +343,12 @@ define @intrinsic_vmfne_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu -; CHECK-NEXT: vmfne.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu -; CHECK-NEXT: vmfne.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmfne.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfne.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfne.nxv2f32( @@ -395,12 +395,12 @@ define @intrinsic_vmfne_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu -; CHECK-NEXT: vmfne.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu -; CHECK-NEXT: vmfne.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmfne.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfne.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfne.nxv4f32( @@ -447,12 +447,12 @@ define @intrinsic_vmfne_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu -; CHECK-NEXT: vmfne.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e32,m4,tu,mu -; CHECK-NEXT: vmfne.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmfne.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfne.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfne.nxv8f32( @@ -499,12 +499,12 @@ define @intrinsic_vmfne_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu -; CHECK-NEXT: vmfne.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu -; CHECK-NEXT: vmfne.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmfne.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfne.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfne.nxv1f64( @@ -551,12 +551,12 @@ define @intrinsic_vmfne_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu -; CHECK-NEXT: vmfne.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu -; CHECK-NEXT: vmfne.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmfne.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfne.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfne.nxv2f64( @@ -603,12 +603,12 @@ define @intrinsic_vmfne_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu -; CHECK-NEXT: vmfne.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu -; CHECK-NEXT: vmfne.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmfne.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmfne.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfne.nxv4f64( @@ -658,7 +658,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfne.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -707,7 +707,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfne.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -756,7 +756,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfne.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -805,7 +805,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmfne.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -854,7 +854,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmfne.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -903,7 +903,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfne.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -952,7 +952,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfne.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1001,7 +1001,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmfne.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1050,7 +1050,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmfne.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1099,7 +1099,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfne.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1148,7 +1148,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmfne.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1197,7 +1197,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmfne.vf v25, v8, ft0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll @@ -31,12 +31,12 @@ define @intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu -; CHECK-NEXT: vmseq.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,mf8,tu,mu -; CHECK-NEXT: vmseq.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmseq.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmseq.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv1i8( @@ -83,12 +83,12 @@ define @intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu -; CHECK-NEXT: vmseq.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,mf4,tu,mu -; CHECK-NEXT: vmseq.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmseq.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmseq.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv2i8( @@ -135,12 +135,12 @@ define @intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu -; CHECK-NEXT: vmseq.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,tu,mu -; CHECK-NEXT: vmseq.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmseq.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmseq.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv4i8( @@ -187,12 +187,12 @@ define @intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu -; CHECK-NEXT: vmseq.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu -; CHECK-NEXT: vmseq.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmseq.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmseq.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv8i8( @@ -239,12 +239,12 @@ define @intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu -; CHECK-NEXT: vmseq.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e8,m2,tu,mu -; CHECK-NEXT: vmseq.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmseq.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmseq.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv16i8( @@ -291,12 +291,12 @@ define @intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu -; CHECK-NEXT: vmseq.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e8,m4,tu,mu -; CHECK-NEXT: vmseq.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmseq.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmseq.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv32i8( @@ -343,12 +343,12 @@ define @intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu -; CHECK-NEXT: vmseq.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,tu,mu -; CHECK-NEXT: vmseq.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmseq.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmseq.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv1i16( @@ -395,12 +395,12 @@ define @intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu -; CHECK-NEXT: vmseq.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,tu,mu -; CHECK-NEXT: vmseq.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmseq.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmseq.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv2i16( @@ -447,12 +447,12 @@ define @intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu -; CHECK-NEXT: vmseq.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu -; CHECK-NEXT: vmseq.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmseq.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmseq.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv4i16( @@ -499,12 +499,12 @@ define @intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu -; CHECK-NEXT: vmseq.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu -; CHECK-NEXT: vmseq.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmseq.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmseq.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv8i16( @@ -551,12 +551,12 @@ define @intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu -; CHECK-NEXT: vmseq.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu -; CHECK-NEXT: vmseq.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmseq.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmseq.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv16i16( @@ -603,12 +603,12 @@ define @intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu -; CHECK-NEXT: vmseq.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,tu,mu -; CHECK-NEXT: vmseq.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmseq.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmseq.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv1i32( @@ -655,12 +655,12 @@ define @intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu -; CHECK-NEXT: vmseq.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu -; CHECK-NEXT: vmseq.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmseq.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmseq.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv2i32( @@ -707,12 +707,12 @@ define @intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu -; CHECK-NEXT: vmseq.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu -; CHECK-NEXT: vmseq.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmseq.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmseq.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv4i32( @@ -759,12 +759,12 @@ define @intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu -; CHECK-NEXT: vmseq.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e32,m4,tu,mu -; CHECK-NEXT: vmseq.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmseq.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmseq.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv8i32( @@ -811,12 +811,12 @@ define @intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu -; CHECK-NEXT: vmseq.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu -; CHECK-NEXT: vmseq.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmseq.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmseq.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv1i64( @@ -863,12 +863,12 @@ define @intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu -; CHECK-NEXT: vmseq.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu -; CHECK-NEXT: vmseq.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmseq.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmseq.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv2i64( @@ -915,12 +915,12 @@ define @intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu -; CHECK-NEXT: vmseq.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu -; CHECK-NEXT: vmseq.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmseq.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmseq.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv4i64( @@ -968,7 +968,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf8,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1015,7 +1015,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1062,7 +1062,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1109,7 +1109,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1156,7 +1156,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmseq.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1203,7 +1203,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmseq.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1250,7 +1250,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1297,7 +1297,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1344,7 +1344,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1391,7 +1391,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmseq.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1438,7 +1438,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmseq.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1485,7 +1485,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1532,7 +1532,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1579,7 +1579,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmseq.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1626,7 +1626,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmseq.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1685,7 +1685,6 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vv v25, v8, v26, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1745,7 +1744,6 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmseq.vv v25, v8, v26, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1805,7 +1803,6 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v28, (a0), zero ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmseq.vv v25, v8, v28, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1841,7 +1838,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1876,7 +1873,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1911,7 +1908,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1946,7 +1943,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1981,7 +1978,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmseq.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2016,7 +2013,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmseq.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2051,7 +2048,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2086,7 +2083,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2121,7 +2118,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2156,7 +2153,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmseq.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2191,7 +2188,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmseq.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2226,7 +2223,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2261,7 +2258,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2296,7 +2293,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmseq.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2331,7 +2328,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmseq.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2366,7 +2363,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2401,7 +2398,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmseq.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2436,7 +2433,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmseq.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll @@ -31,12 +31,12 @@ define @intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu -; CHECK-NEXT: vmseq.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,mf8,tu,mu -; CHECK-NEXT: vmseq.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmseq.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmseq.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv1i8( @@ -83,12 +83,12 @@ define @intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu -; CHECK-NEXT: vmseq.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,mf4,tu,mu -; CHECK-NEXT: vmseq.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmseq.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmseq.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv2i8( @@ -135,12 +135,12 @@ define @intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu -; CHECK-NEXT: vmseq.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,tu,mu -; CHECK-NEXT: vmseq.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmseq.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmseq.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv4i8( @@ -187,12 +187,12 @@ define @intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu -; CHECK-NEXT: vmseq.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu -; CHECK-NEXT: vmseq.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmseq.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmseq.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv8i8( @@ -239,12 +239,12 @@ define @intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu -; CHECK-NEXT: vmseq.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e8,m2,tu,mu -; CHECK-NEXT: vmseq.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmseq.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmseq.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv16i8( @@ -291,12 +291,12 @@ define @intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu -; CHECK-NEXT: vmseq.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e8,m4,tu,mu -; CHECK-NEXT: vmseq.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmseq.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmseq.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv32i8( @@ -343,12 +343,12 @@ define @intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu -; CHECK-NEXT: vmseq.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,tu,mu -; CHECK-NEXT: vmseq.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmseq.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmseq.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv1i16( @@ -395,12 +395,12 @@ define @intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu -; CHECK-NEXT: vmseq.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,tu,mu -; CHECK-NEXT: vmseq.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmseq.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmseq.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv2i16( @@ -447,12 +447,12 @@ define @intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu -; CHECK-NEXT: vmseq.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu -; CHECK-NEXT: vmseq.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmseq.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmseq.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv4i16( @@ -499,12 +499,12 @@ define @intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu -; CHECK-NEXT: vmseq.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu -; CHECK-NEXT: vmseq.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmseq.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmseq.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv8i16( @@ -551,12 +551,12 @@ define @intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu -; CHECK-NEXT: vmseq.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu -; CHECK-NEXT: vmseq.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmseq.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmseq.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv16i16( @@ -603,12 +603,12 @@ define @intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu -; CHECK-NEXT: vmseq.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,tu,mu -; CHECK-NEXT: vmseq.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmseq.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmseq.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv1i32( @@ -655,12 +655,12 @@ define @intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu -; CHECK-NEXT: vmseq.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu -; CHECK-NEXT: vmseq.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmseq.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmseq.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv2i32( @@ -707,12 +707,12 @@ define @intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu -; CHECK-NEXT: vmseq.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu -; CHECK-NEXT: vmseq.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmseq.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmseq.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv4i32( @@ -759,12 +759,12 @@ define @intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu -; CHECK-NEXT: vmseq.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e32,m4,tu,mu -; CHECK-NEXT: vmseq.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmseq.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmseq.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv8i32( @@ -811,12 +811,12 @@ define @intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu -; CHECK-NEXT: vmseq.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu -; CHECK-NEXT: vmseq.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmseq.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmseq.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv1i64( @@ -863,12 +863,12 @@ define @intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu -; CHECK-NEXT: vmseq.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu -; CHECK-NEXT: vmseq.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmseq.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmseq.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv2i64( @@ -915,12 +915,12 @@ define @intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu -; CHECK-NEXT: vmseq.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu -; CHECK-NEXT: vmseq.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmseq.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmseq.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv4i64( @@ -968,7 +968,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf8,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1015,7 +1015,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1062,7 +1062,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1109,7 +1109,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1156,7 +1156,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmseq.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1203,7 +1203,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmseq.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1250,7 +1250,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1297,7 +1297,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1344,7 +1344,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1391,7 +1391,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmseq.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1438,7 +1438,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmseq.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1485,7 +1485,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1532,7 +1532,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1579,7 +1579,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmseq.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1626,7 +1626,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmseq.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1673,7 +1673,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e64,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1720,7 +1720,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e64,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmseq.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1767,7 +1767,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e64,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmseq.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1802,7 +1802,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1837,7 +1837,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1872,7 +1872,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1907,7 +1907,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1942,7 +1942,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmseq.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1977,7 +1977,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmseq.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2012,7 +2012,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2047,7 +2047,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2082,7 +2082,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2117,7 +2117,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmseq.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2152,7 +2152,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmseq.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2187,7 +2187,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2222,7 +2222,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2257,7 +2257,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmseq.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2292,7 +2292,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmseq.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2327,7 +2327,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2362,7 +2362,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmseq.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2397,7 +2397,7 @@ ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmseq.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll @@ -31,12 +31,12 @@ define @intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu -; CHECK-NEXT: vmsle.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,mf8,tu,mu -; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsle.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv1i8( @@ -83,12 +83,12 @@ define @intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu -; CHECK-NEXT: vmsle.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,mf4,tu,mu -; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsle.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv2i8( @@ -135,12 +135,12 @@ define @intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu -; CHECK-NEXT: vmsle.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,tu,mu -; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsle.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv4i8( @@ -187,12 +187,12 @@ define @intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu -; CHECK-NEXT: vmsle.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu -; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsle.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv8i8( @@ -239,12 +239,12 @@ define @intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu -; CHECK-NEXT: vmsle.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m2,tu,mu -; CHECK-NEXT: vmsle.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmsle.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv16i8( @@ -291,12 +291,12 @@ define @intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu -; CHECK-NEXT: vmsle.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m4,tu,mu -; CHECK-NEXT: vmsle.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmsle.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv32i8( @@ -343,12 +343,12 @@ define @intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu -; CHECK-NEXT: vmsle.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,tu,mu -; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsle.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv1i16( @@ -395,12 +395,12 @@ define @intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu -; CHECK-NEXT: vmsle.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,tu,mu -; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsle.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv2i16( @@ -447,12 +447,12 @@ define @intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu -; CHECK-NEXT: vmsle.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu -; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsle.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv4i16( @@ -499,12 +499,12 @@ define @intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu -; CHECK-NEXT: vmsle.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu -; CHECK-NEXT: vmsle.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmsle.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv8i16( @@ -551,12 +551,12 @@ define @intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu -; CHECK-NEXT: vmsle.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu -; CHECK-NEXT: vmsle.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmsle.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv16i16( @@ -603,12 +603,12 @@ define @intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu -; CHECK-NEXT: vmsle.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,tu,mu -; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsle.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv1i32( @@ -655,12 +655,12 @@ define @intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu -; CHECK-NEXT: vmsle.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu -; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsle.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv2i32( @@ -707,12 +707,12 @@ define @intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu -; CHECK-NEXT: vmsle.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu -; CHECK-NEXT: vmsle.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmsle.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv4i32( @@ -759,12 +759,12 @@ define @intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu -; CHECK-NEXT: vmsle.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,m4,tu,mu -; CHECK-NEXT: vmsle.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmsle.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv8i32( @@ -811,12 +811,12 @@ define @intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu -; CHECK-NEXT: vmsle.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu -; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsle.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv1i64( @@ -863,12 +863,12 @@ define @intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu -; CHECK-NEXT: vmsle.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu -; CHECK-NEXT: vmsle.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmsle.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv2i64( @@ -915,12 +915,12 @@ define @intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu -; CHECK-NEXT: vmsle.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu -; CHECK-NEXT: vmsle.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmsle.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv4i64( @@ -969,10 +969,9 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf8,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e8,mf8,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1018,10 +1017,9 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e8,mf4,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1067,10 +1065,9 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1116,10 +1113,9 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1165,10 +1161,9 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e8,m2,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v10 ; CHECK-NEXT: ret entry: @@ -1214,10 +1209,9 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e8,m4,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v12 ; CHECK-NEXT: ret entry: @@ -1263,10 +1257,9 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1312,10 +1305,9 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1361,10 +1353,9 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1410,10 +1401,9 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16,m2,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v10 ; CHECK-NEXT: ret entry: @@ -1459,10 +1449,9 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16,m4,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v12 ; CHECK-NEXT: ret entry: @@ -1508,10 +1497,9 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1557,10 +1545,9 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32,m1,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1606,10 +1593,9 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32,m2,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v10 ; CHECK-NEXT: ret entry: @@ -1655,10 +1641,9 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32,m4,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v12 ; CHECK-NEXT: ret entry: @@ -1715,7 +1700,6 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vv v25, v26, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1775,7 +1759,6 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsle.vv v25, v26, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1835,7 +1818,6 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v28, (a0), zero ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsle.vv v25, v28, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1871,7 +1853,7 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v25, v8, -15, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1906,7 +1888,7 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v25, v8, -13, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1941,7 +1923,7 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v25, v8, -11, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1976,7 +1958,7 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v25, v8, -9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2011,7 +1993,7 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgt.vi v25, v8, -7, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2046,7 +2028,7 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgt.vi v25, v8, -5, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2081,7 +2063,7 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v25, v8, -3, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2116,7 +2098,7 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v25, v8, -1, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2151,7 +2133,7 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v25, v8, 0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2186,7 +2168,7 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgt.vi v25, v8, 2, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2221,7 +2203,7 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgt.vi v25, v8, 4, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2256,7 +2238,7 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v25, v8, 6, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2291,7 +2273,7 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v25, v8, 8, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2326,7 +2308,7 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgt.vi v25, v8, 10, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2361,7 +2343,7 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgt.vi v25, v8, 12, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2396,7 +2378,7 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v25, v8, 8, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2431,7 +2413,7 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgt.vi v25, v8, 8, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2466,7 +2448,7 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgt.vi v25, v8, 8, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2762,7 +2744,6 @@ ; CHECK-NEXT: vsetvli zero, a2, e64,m1,ta,mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vmsle.vv v0, v25, v8, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2787,7 +2768,6 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu ; CHECK-NEXT: vmsle.vv v25, v26, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 ; CHECK-NEXT: addi sp, sp, 16 @@ -2813,7 +2793,6 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v28, (a0), zero ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu ; CHECK-NEXT: vmsle.vv v25, v28, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 ; CHECK-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll @@ -31,12 +31,12 @@ define @intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu -; CHECK-NEXT: vmsle.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,mf8,tu,mu -; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsle.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv1i8( @@ -83,12 +83,12 @@ define @intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu -; CHECK-NEXT: vmsle.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,mf4,tu,mu -; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsle.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv2i8( @@ -135,12 +135,12 @@ define @intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu -; CHECK-NEXT: vmsle.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,tu,mu -; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsle.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv4i8( @@ -187,12 +187,12 @@ define @intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu -; CHECK-NEXT: vmsle.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu -; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsle.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv8i8( @@ -239,12 +239,12 @@ define @intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu -; CHECK-NEXT: vmsle.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m2,tu,mu -; CHECK-NEXT: vmsle.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmsle.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv16i8( @@ -291,12 +291,12 @@ define @intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu -; CHECK-NEXT: vmsle.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m4,tu,mu -; CHECK-NEXT: vmsle.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmsle.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv32i8( @@ -343,12 +343,12 @@ define @intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu -; CHECK-NEXT: vmsle.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,tu,mu -; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsle.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv1i16( @@ -395,12 +395,12 @@ define @intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu -; CHECK-NEXT: vmsle.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,tu,mu -; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsle.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv2i16( @@ -447,12 +447,12 @@ define @intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu -; CHECK-NEXT: vmsle.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu -; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsle.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv4i16( @@ -499,12 +499,12 @@ define @intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu -; CHECK-NEXT: vmsle.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu -; CHECK-NEXT: vmsle.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmsle.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv8i16( @@ -551,12 +551,12 @@ define @intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu -; CHECK-NEXT: vmsle.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu -; CHECK-NEXT: vmsle.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmsle.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv16i16( @@ -603,12 +603,12 @@ define @intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu -; CHECK-NEXT: vmsle.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,tu,mu -; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsle.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv1i32( @@ -655,12 +655,12 @@ define @intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu -; CHECK-NEXT: vmsle.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu -; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsle.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv2i32( @@ -707,12 +707,12 @@ define @intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu -; CHECK-NEXT: vmsle.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu -; CHECK-NEXT: vmsle.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmsle.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv4i32( @@ -759,12 +759,12 @@ define @intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu -; CHECK-NEXT: vmsle.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,m4,tu,mu -; CHECK-NEXT: vmsle.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmsle.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv8i32( @@ -811,12 +811,12 @@ define @intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu -; CHECK-NEXT: vmsle.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu -; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsle.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv1i64( @@ -863,12 +863,12 @@ define @intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu -; CHECK-NEXT: vmsle.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu -; CHECK-NEXT: vmsle.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmsle.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv2i64( @@ -915,12 +915,12 @@ define @intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu -; CHECK-NEXT: vmsle.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu -; CHECK-NEXT: vmsle.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmsle.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv4i64( @@ -969,10 +969,9 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf8,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e8,mf8,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1018,10 +1017,9 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e8,mf4,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1067,10 +1065,9 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1116,10 +1113,9 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1165,10 +1161,9 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e8,m2,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v10 ; CHECK-NEXT: ret entry: @@ -1214,10 +1209,9 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e8,m4,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v12 ; CHECK-NEXT: ret entry: @@ -1263,10 +1257,9 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1312,10 +1305,9 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1361,10 +1353,9 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1410,10 +1401,9 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16,m2,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v10 ; CHECK-NEXT: ret entry: @@ -1459,10 +1449,9 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16,m4,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v12 ; CHECK-NEXT: ret entry: @@ -1508,10 +1497,9 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1557,10 +1545,9 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32,m1,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1606,10 +1593,9 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32,m2,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v10 ; CHECK-NEXT: ret entry: @@ -1655,10 +1641,9 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32,m4,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v12 ; CHECK-NEXT: ret entry: @@ -1704,10 +1689,9 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e64,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1753,10 +1737,9 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e64,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64,m2,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v10 ; CHECK-NEXT: ret entry: @@ -1802,10 +1785,9 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e64,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64,m4,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v12 ; CHECK-NEXT: ret entry: @@ -1838,7 +1820,7 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v25, v8, -15, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1873,7 +1855,7 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v25, v8, -13, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1908,7 +1890,7 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v25, v8, -11, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1943,7 +1925,7 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v25, v8, -9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1978,7 +1960,7 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgt.vi v25, v8, -7, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2013,7 +1995,7 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgt.vi v25, v8, -5, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2048,7 +2030,7 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v25, v8, -3, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2083,7 +2065,7 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v25, v8, -1, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2118,7 +2100,7 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v25, v8, 0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2153,7 +2135,7 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgt.vi v25, v8, 2, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2188,7 +2170,7 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgt.vi v25, v8, 4, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2223,7 +2205,7 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v25, v8, 6, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2258,7 +2240,7 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v25, v8, 8, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2293,7 +2275,7 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgt.vi v25, v8, 10, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2328,7 +2310,7 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgt.vi v25, v8, 12, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2363,7 +2345,7 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v25, v8, 14, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2398,7 +2380,7 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgt.vi v25, v8, -16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2433,7 +2415,7 @@ ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgt.vi v25, v8, -14, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll @@ -31,12 +31,12 @@ define @intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,mf8,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsleu.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv1i8( @@ -83,12 +83,12 @@ define @intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,mf4,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsleu.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv2i8( @@ -135,12 +135,12 @@ define @intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsleu.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv4i8( @@ -187,12 +187,12 @@ define @intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsleu.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv8i8( @@ -239,12 +239,12 @@ define @intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m2,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmsleu.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv16i8( @@ -291,12 +291,12 @@ define @intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m4,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmsleu.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv32i8( @@ -343,12 +343,12 @@ define @intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsleu.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv1i16( @@ -395,12 +395,12 @@ define @intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsleu.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv2i16( @@ -447,12 +447,12 @@ define @intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsleu.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv4i16( @@ -499,12 +499,12 @@ define @intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmsleu.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv8i16( @@ -551,12 +551,12 @@ define @intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmsleu.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv16i16( @@ -603,12 +603,12 @@ define @intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsleu.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv1i32( @@ -655,12 +655,12 @@ define @intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsleu.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv2i32( @@ -707,12 +707,12 @@ define @intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmsleu.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv4i32( @@ -759,12 +759,12 @@ define @intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,m4,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmsleu.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv8i32( @@ -811,12 +811,12 @@ define @intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsleu.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv1i64( @@ -863,12 +863,12 @@ define @intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmsleu.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv2i64( @@ -915,12 +915,12 @@ define @intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmsleu.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv4i64( @@ -969,10 +969,9 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf8,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e8,mf8,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1018,10 +1017,9 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e8,mf4,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1067,10 +1065,9 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1116,10 +1113,9 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1165,10 +1161,9 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e8,m2,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v10 ; CHECK-NEXT: ret entry: @@ -1214,10 +1209,9 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e8,m4,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v12 ; CHECK-NEXT: ret entry: @@ -1263,10 +1257,9 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1312,10 +1305,9 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1361,10 +1353,9 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1410,10 +1401,9 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16,m2,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v10 ; CHECK-NEXT: ret entry: @@ -1459,10 +1449,9 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16,m4,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v12 ; CHECK-NEXT: ret entry: @@ -1508,10 +1497,9 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1557,10 +1545,9 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32,m1,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1606,10 +1593,9 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32,m2,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v10 ; CHECK-NEXT: ret entry: @@ -1655,10 +1641,9 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32,m4,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v12 ; CHECK-NEXT: ret entry: @@ -1715,7 +1700,6 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vv v25, v26, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1775,7 +1759,6 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsleu.vv v25, v26, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1835,7 +1818,6 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v28, (a0), zero ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsleu.vv v25, v28, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1871,7 +1853,7 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v25, v8, -15, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1906,7 +1888,7 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v25, v8, -13, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1941,7 +1923,7 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v25, v8, -11, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1976,7 +1958,7 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v25, v8, -9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2011,7 +1993,7 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgtu.vi v25, v8, -7, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2046,7 +2028,7 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgtu.vi v25, v8, -5, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2081,7 +2063,7 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v25, v8, -3, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2116,7 +2098,7 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vv v25, v8, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2151,7 +2133,7 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v25, v8, 0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2186,7 +2168,7 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgtu.vi v25, v8, 2, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2221,7 +2203,7 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgtu.vi v25, v8, 4, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2256,7 +2238,7 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v25, v8, 6, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2291,7 +2273,7 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v25, v8, 8, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2326,7 +2308,7 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgtu.vi v25, v8, 10, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2361,7 +2343,7 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgtu.vi v25, v8, 12, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2396,7 +2378,7 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v25, v8, 14, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2431,7 +2413,7 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgtu.vi v25, v8, -16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2466,7 +2448,7 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgtu.vi v25, v8, -14, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2762,7 +2744,6 @@ ; CHECK-NEXT: vsetvli zero, a2, e64,m1,ta,mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vmsleu.vv v0, v25, v8, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2787,7 +2768,6 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu ; CHECK-NEXT: vmsleu.vv v25, v26, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 ; CHECK-NEXT: addi sp, sp, 16 @@ -2813,7 +2793,6 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v28, (a0), zero ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu ; CHECK-NEXT: vmsleu.vv v25, v28, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 ; CHECK-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll @@ -31,12 +31,12 @@ define @intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,mf8,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsleu.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv1i8( @@ -83,12 +83,12 @@ define @intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,mf4,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsleu.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv2i8( @@ -135,12 +135,12 @@ define @intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsleu.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv4i8( @@ -187,12 +187,12 @@ define @intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsleu.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv8i8( @@ -239,12 +239,12 @@ define @intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m2,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmsleu.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv16i8( @@ -291,12 +291,12 @@ define @intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m4,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmsleu.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv32i8( @@ -343,12 +343,12 @@ define @intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsleu.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv1i16( @@ -395,12 +395,12 @@ define @intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsleu.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv2i16( @@ -447,12 +447,12 @@ define @intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsleu.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv4i16( @@ -499,12 +499,12 @@ define @intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmsleu.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv8i16( @@ -551,12 +551,12 @@ define @intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmsleu.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv16i16( @@ -603,12 +603,12 @@ define @intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsleu.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv1i32( @@ -655,12 +655,12 @@ define @intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsleu.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv2i32( @@ -707,12 +707,12 @@ define @intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmsleu.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv4i32( @@ -759,12 +759,12 @@ define @intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,m4,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmsleu.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv8i32( @@ -811,12 +811,12 @@ define @intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsleu.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv1i64( @@ -863,12 +863,12 @@ define @intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmsleu.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv2i64( @@ -915,12 +915,12 @@ define @intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmsleu.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv4i64( @@ -969,10 +969,9 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf8,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e8,mf8,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1018,10 +1017,9 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e8,mf4,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1067,10 +1065,9 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1116,10 +1113,9 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1165,10 +1161,9 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e8,m2,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v10 ; CHECK-NEXT: ret entry: @@ -1214,10 +1209,9 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e8,m4,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v12 ; CHECK-NEXT: ret entry: @@ -1263,10 +1257,9 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1312,10 +1305,9 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1361,10 +1353,9 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1410,10 +1401,9 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16,m2,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v10 ; CHECK-NEXT: ret entry: @@ -1459,10 +1449,9 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16,m4,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v12 ; CHECK-NEXT: ret entry: @@ -1508,10 +1497,9 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1557,10 +1545,9 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32,m1,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1606,10 +1593,9 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32,m2,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v10 ; CHECK-NEXT: ret entry: @@ -1655,10 +1641,9 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32,m4,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v12 ; CHECK-NEXT: ret entry: @@ -1704,10 +1689,9 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e64,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1753,10 +1737,9 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e64,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64,m2,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v10 ; CHECK-NEXT: ret entry: @@ -1802,10 +1785,9 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e64,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64,m4,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v12 ; CHECK-NEXT: ret entry: @@ -1838,7 +1820,7 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v25, v8, -15, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1873,7 +1855,7 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v25, v8, -13, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1908,7 +1890,7 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v25, v8, -11, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1943,7 +1925,7 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v25, v8, -9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1978,7 +1960,7 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgtu.vi v25, v8, -7, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2013,7 +1995,7 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgtu.vi v25, v8, -5, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2048,7 +2030,7 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v25, v8, -3, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2083,7 +2065,7 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vv v25, v8, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2118,7 +2100,7 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v25, v8, 0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2153,7 +2135,7 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgtu.vi v25, v8, 2, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2188,7 +2170,7 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgtu.vi v25, v8, 4, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2223,7 +2205,7 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v25, v8, 6, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2258,7 +2240,7 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v25, v8, 8, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2293,7 +2275,7 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgtu.vi v25, v8, 10, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2328,7 +2310,7 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgtu.vi v25, v8, 12, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2363,7 +2345,7 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v25, v8, 14, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2398,7 +2380,7 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgtu.vi v25, v8, -16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2433,7 +2415,7 @@ ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgtu.vi v25, v8, -14, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll @@ -31,12 +31,12 @@ define @intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu -; CHECK-NEXT: vmslt.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,mf8,tu,mu -; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmslt.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv1i8( @@ -83,12 +83,12 @@ define @intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu -; CHECK-NEXT: vmslt.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,mf4,tu,mu -; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmslt.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv2i8( @@ -135,12 +135,12 @@ define @intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu -; CHECK-NEXT: vmslt.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,tu,mu -; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmslt.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv4i8( @@ -187,12 +187,12 @@ define @intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu -; CHECK-NEXT: vmslt.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu -; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmslt.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv8i8( @@ -239,12 +239,12 @@ define @intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu -; CHECK-NEXT: vmslt.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m2,tu,mu -; CHECK-NEXT: vmslt.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmslt.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv16i8( @@ -291,12 +291,12 @@ define @intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu -; CHECK-NEXT: vmslt.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m4,tu,mu -; CHECK-NEXT: vmslt.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmslt.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv32i8( @@ -343,12 +343,12 @@ define @intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu -; CHECK-NEXT: vmslt.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,tu,mu -; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmslt.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv1i16( @@ -395,12 +395,12 @@ define @intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu -; CHECK-NEXT: vmslt.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,tu,mu -; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmslt.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv2i16( @@ -447,12 +447,12 @@ define @intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu -; CHECK-NEXT: vmslt.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu -; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmslt.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv4i16( @@ -499,12 +499,12 @@ define @intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu -; CHECK-NEXT: vmslt.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu -; CHECK-NEXT: vmslt.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmslt.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv8i16( @@ -551,12 +551,12 @@ define @intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu -; CHECK-NEXT: vmslt.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu -; CHECK-NEXT: vmslt.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmslt.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv16i16( @@ -603,12 +603,12 @@ define @intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu -; CHECK-NEXT: vmslt.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,tu,mu -; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmslt.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv1i32( @@ -655,12 +655,12 @@ define @intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu -; CHECK-NEXT: vmslt.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu -; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmslt.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv2i32( @@ -707,12 +707,12 @@ define @intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu -; CHECK-NEXT: vmslt.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu -; CHECK-NEXT: vmslt.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmslt.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv4i32( @@ -759,12 +759,12 @@ define @intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu -; CHECK-NEXT: vmslt.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,m4,tu,mu -; CHECK-NEXT: vmslt.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmslt.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv8i32( @@ -811,12 +811,12 @@ define @intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu -; CHECK-NEXT: vmslt.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu -; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmslt.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv1i64( @@ -863,12 +863,12 @@ define @intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu -; CHECK-NEXT: vmslt.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu -; CHECK-NEXT: vmslt.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmslt.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv2i64( @@ -915,12 +915,12 @@ define @intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu -; CHECK-NEXT: vmslt.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu -; CHECK-NEXT: vmslt.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmslt.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv4i64( @@ -968,7 +968,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf8,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1015,7 +1015,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1062,7 +1062,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1109,7 +1109,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1156,7 +1156,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1203,7 +1203,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1250,7 +1250,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1297,7 +1297,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1344,7 +1344,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1391,7 +1391,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1438,7 +1438,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1485,7 +1485,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1532,7 +1532,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1579,7 +1579,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1626,7 +1626,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1685,7 +1685,6 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vv v25, v26, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1745,7 +1744,6 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmslt.vv v25, v26, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1805,7 +1803,6 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v28, (a0), zero ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmslt.vv v25, v28, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1841,7 +1838,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1876,7 +1873,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1911,7 +1908,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1946,7 +1943,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1981,7 +1978,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgt.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2016,7 +2013,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgt.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2051,7 +2048,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2086,7 +2083,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2121,7 +2118,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2156,7 +2153,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgt.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2191,7 +2188,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgt.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2226,7 +2223,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2261,7 +2258,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2296,7 +2293,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgt.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2331,7 +2328,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgt.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2366,7 +2363,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2401,7 +2398,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgt.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2436,7 +2433,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgt.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll @@ -31,12 +31,12 @@ define @intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu -; CHECK-NEXT: vmslt.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,mf8,tu,mu -; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmslt.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv1i8( @@ -83,12 +83,12 @@ define @intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu -; CHECK-NEXT: vmslt.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,mf4,tu,mu -; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmslt.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv2i8( @@ -135,12 +135,12 @@ define @intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu -; CHECK-NEXT: vmslt.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,tu,mu -; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmslt.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv4i8( @@ -187,12 +187,12 @@ define @intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu -; CHECK-NEXT: vmslt.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu -; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmslt.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv8i8( @@ -239,12 +239,12 @@ define @intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu -; CHECK-NEXT: vmslt.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m2,tu,mu -; CHECK-NEXT: vmslt.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmslt.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv16i8( @@ -291,12 +291,12 @@ define @intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu -; CHECK-NEXT: vmslt.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m4,tu,mu -; CHECK-NEXT: vmslt.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmslt.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv32i8( @@ -343,12 +343,12 @@ define @intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu -; CHECK-NEXT: vmslt.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,tu,mu -; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmslt.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv1i16( @@ -395,12 +395,12 @@ define @intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu -; CHECK-NEXT: vmslt.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,tu,mu -; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmslt.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv2i16( @@ -447,12 +447,12 @@ define @intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu -; CHECK-NEXT: vmslt.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu -; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmslt.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv4i16( @@ -499,12 +499,12 @@ define @intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu -; CHECK-NEXT: vmslt.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu -; CHECK-NEXT: vmslt.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmslt.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv8i16( @@ -551,12 +551,12 @@ define @intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu -; CHECK-NEXT: vmslt.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu -; CHECK-NEXT: vmslt.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmslt.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv16i16( @@ -603,12 +603,12 @@ define @intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu -; CHECK-NEXT: vmslt.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,tu,mu -; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmslt.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv1i32( @@ -655,12 +655,12 @@ define @intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu -; CHECK-NEXT: vmslt.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu -; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmslt.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv2i32( @@ -707,12 +707,12 @@ define @intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu -; CHECK-NEXT: vmslt.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu -; CHECK-NEXT: vmslt.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmslt.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv4i32( @@ -759,12 +759,12 @@ define @intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu -; CHECK-NEXT: vmslt.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,m4,tu,mu -; CHECK-NEXT: vmslt.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmslt.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv8i32( @@ -811,12 +811,12 @@ define @intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu -; CHECK-NEXT: vmslt.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu -; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmslt.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv1i64( @@ -863,12 +863,12 @@ define @intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu -; CHECK-NEXT: vmslt.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu -; CHECK-NEXT: vmslt.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmslt.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv2i64( @@ -915,12 +915,12 @@ define @intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu -; CHECK-NEXT: vmslt.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu -; CHECK-NEXT: vmslt.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmslt.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv4i64( @@ -968,7 +968,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf8,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1015,7 +1015,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1062,7 +1062,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1109,7 +1109,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1156,7 +1156,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1203,7 +1203,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1250,7 +1250,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1297,7 +1297,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1344,7 +1344,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1391,7 +1391,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1438,7 +1438,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1485,7 +1485,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1532,7 +1532,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1579,7 +1579,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1626,7 +1626,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1673,7 +1673,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e64,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1720,7 +1720,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e64,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1767,7 +1767,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e64,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1802,7 +1802,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1837,7 +1837,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1872,7 +1872,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1907,7 +1907,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1942,7 +1942,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgt.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1977,7 +1977,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgt.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2012,7 +2012,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2047,7 +2047,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2082,7 +2082,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2117,7 +2117,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgt.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2152,7 +2152,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgt.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2187,7 +2187,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2222,7 +2222,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2257,7 +2257,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgt.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2292,7 +2292,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgt.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2327,7 +2327,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2362,7 +2362,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgt.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2397,7 +2397,7 @@ ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgt.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll @@ -31,12 +31,12 @@ define @intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,mf8,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsltu.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv1i8( @@ -83,12 +83,12 @@ define @intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,mf4,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsltu.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv2i8( @@ -135,12 +135,12 @@ define @intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsltu.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv4i8( @@ -187,12 +187,12 @@ define @intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsltu.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv8i8( @@ -239,12 +239,12 @@ define @intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m2,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmsltu.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv16i8( @@ -291,12 +291,12 @@ define @intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m4,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmsltu.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv32i8( @@ -343,12 +343,12 @@ define @intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsltu.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv1i16( @@ -395,12 +395,12 @@ define @intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsltu.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv2i16( @@ -447,12 +447,12 @@ define @intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsltu.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv4i16( @@ -499,12 +499,12 @@ define @intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmsltu.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv8i16( @@ -551,12 +551,12 @@ define @intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmsltu.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv16i16( @@ -603,12 +603,12 @@ define @intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsltu.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv1i32( @@ -655,12 +655,12 @@ define @intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsltu.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv2i32( @@ -707,12 +707,12 @@ define @intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmsltu.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv4i32( @@ -759,12 +759,12 @@ define @intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,m4,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmsltu.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv8i32( @@ -811,12 +811,12 @@ define @intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsltu.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv1i64( @@ -863,12 +863,12 @@ define @intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmsltu.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv2i64( @@ -915,12 +915,12 @@ define @intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmsltu.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv4i64( @@ -968,7 +968,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf8,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1015,7 +1015,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1062,7 +1062,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1109,7 +1109,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1156,7 +1156,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgtu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1203,7 +1203,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgtu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1250,7 +1250,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1297,7 +1297,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1344,7 +1344,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1391,7 +1391,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgtu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1438,7 +1438,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgtu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1485,7 +1485,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1532,7 +1532,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1579,7 +1579,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgtu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1626,7 +1626,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgtu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1685,7 +1685,6 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vv v25, v26, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1745,7 +1744,6 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsltu.vv v25, v26, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1805,7 +1803,6 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v28, (a0), zero ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsltu.vv v25, v28, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1841,7 +1838,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1876,7 +1873,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1911,7 +1908,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1946,7 +1943,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1981,7 +1978,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgtu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2016,7 +2013,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgtu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2051,7 +2048,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2086,7 +2083,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2121,7 +2118,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2156,7 +2153,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgtu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2191,7 +2188,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgtu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2226,7 +2223,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2261,7 +2258,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2296,7 +2293,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgtu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2331,7 +2328,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgtu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2366,7 +2363,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2401,7 +2398,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgtu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2436,7 +2433,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgtu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll @@ -31,12 +31,12 @@ define @intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,mf8,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsltu.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv1i8( @@ -83,12 +83,12 @@ define @intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,mf4,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsltu.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv2i8( @@ -135,12 +135,12 @@ define @intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsltu.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv4i8( @@ -187,12 +187,12 @@ define @intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsltu.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv8i8( @@ -239,12 +239,12 @@ define @intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m2,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmsltu.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv16i8( @@ -291,12 +291,12 @@ define @intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m4,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmsltu.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv32i8( @@ -343,12 +343,12 @@ define @intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsltu.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv1i16( @@ -395,12 +395,12 @@ define @intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsltu.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv2i16( @@ -447,12 +447,12 @@ define @intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsltu.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv4i16( @@ -499,12 +499,12 @@ define @intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmsltu.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv8i16( @@ -551,12 +551,12 @@ define @intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmsltu.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv16i16( @@ -603,12 +603,12 @@ define @intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsltu.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv1i32( @@ -655,12 +655,12 @@ define @intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsltu.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv2i32( @@ -707,12 +707,12 @@ define @intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmsltu.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv4i32( @@ -759,12 +759,12 @@ define @intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e32,m4,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmsltu.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv8i32( @@ -811,12 +811,12 @@ define @intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmsltu.vv v25, v9, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv1i64( @@ -863,12 +863,12 @@ define @intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmsltu.vv v25, v10, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv2i64( @@ -915,12 +915,12 @@ define @intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmsltu.vv v25, v12, v8 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv4i64( @@ -968,7 +968,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf8,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1015,7 +1015,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1062,7 +1062,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1109,7 +1109,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1156,7 +1156,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgtu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1203,7 +1203,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgtu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1250,7 +1250,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1297,7 +1297,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1344,7 +1344,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1391,7 +1391,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgtu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1438,7 +1438,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgtu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1485,7 +1485,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1532,7 +1532,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1579,7 +1579,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgtu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1626,7 +1626,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgtu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1673,7 +1673,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e64,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1720,7 +1720,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e64,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgtu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1767,7 +1767,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e64,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgtu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1802,7 +1802,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1837,7 +1837,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1872,7 +1872,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1907,7 +1907,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1942,7 +1942,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgtu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1977,7 +1977,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgtu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2012,7 +2012,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2047,7 +2047,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2082,7 +2082,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2117,7 +2117,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgtu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2152,7 +2152,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgtu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2187,7 +2187,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2222,7 +2222,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2257,7 +2257,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgtu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2292,7 +2292,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgtu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2327,7 +2327,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2362,7 +2362,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsgtu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2397,7 +2397,7 @@ ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsgtu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll @@ -31,12 +31,12 @@ define @intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu -; CHECK-NEXT: vmsle.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,mf8,tu,mu -; CHECK-NEXT: vmsle.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsle.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv1i8( @@ -83,12 +83,12 @@ define @intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu -; CHECK-NEXT: vmsle.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,mf4,tu,mu -; CHECK-NEXT: vmsle.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsle.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv2i8( @@ -135,12 +135,12 @@ define @intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu -; CHECK-NEXT: vmsle.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,tu,mu -; CHECK-NEXT: vmsle.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsle.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv4i8( @@ -187,12 +187,12 @@ define @intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu -; CHECK-NEXT: vmsle.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu -; CHECK-NEXT: vmsle.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsle.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv8i8( @@ -239,12 +239,12 @@ define @intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu -; CHECK-NEXT: vmsle.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e8,m2,tu,mu -; CHECK-NEXT: vmsle.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmsle.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv16i8( @@ -291,12 +291,12 @@ define @intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu -; CHECK-NEXT: vmsle.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e8,m4,tu,mu -; CHECK-NEXT: vmsle.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmsle.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv32i8( @@ -343,12 +343,12 @@ define @intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu -; CHECK-NEXT: vmsle.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,tu,mu -; CHECK-NEXT: vmsle.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsle.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv1i16( @@ -395,12 +395,12 @@ define @intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu -; CHECK-NEXT: vmsle.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,tu,mu -; CHECK-NEXT: vmsle.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsle.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv2i16( @@ -447,12 +447,12 @@ define @intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu -; CHECK-NEXT: vmsle.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu -; CHECK-NEXT: vmsle.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsle.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv4i16( @@ -499,12 +499,12 @@ define @intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu -; CHECK-NEXT: vmsle.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu -; CHECK-NEXT: vmsle.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmsle.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv8i16( @@ -551,12 +551,12 @@ define @intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu -; CHECK-NEXT: vmsle.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu -; CHECK-NEXT: vmsle.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmsle.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv16i16( @@ -603,12 +603,12 @@ define @intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu -; CHECK-NEXT: vmsle.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,tu,mu -; CHECK-NEXT: vmsle.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsle.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv1i32( @@ -655,12 +655,12 @@ define @intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu -; CHECK-NEXT: vmsle.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu -; CHECK-NEXT: vmsle.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsle.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv2i32( @@ -707,12 +707,12 @@ define @intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu -; CHECK-NEXT: vmsle.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu -; CHECK-NEXT: vmsle.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmsle.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv4i32( @@ -759,12 +759,12 @@ define @intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu -; CHECK-NEXT: vmsle.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e32,m4,tu,mu -; CHECK-NEXT: vmsle.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmsle.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv8i32( @@ -811,12 +811,12 @@ define @intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu -; CHECK-NEXT: vmsle.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu -; CHECK-NEXT: vmsle.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsle.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv1i64( @@ -863,12 +863,12 @@ define @intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu -; CHECK-NEXT: vmsle.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu -; CHECK-NEXT: vmsle.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmsle.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv2i64( @@ -915,12 +915,12 @@ define @intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu -; CHECK-NEXT: vmsle.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu -; CHECK-NEXT: vmsle.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmsle.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv4i64( @@ -968,7 +968,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf8,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1015,7 +1015,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1062,7 +1062,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1109,7 +1109,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1156,7 +1156,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsle.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1203,7 +1203,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsle.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1250,7 +1250,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1297,7 +1297,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1344,7 +1344,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1391,7 +1391,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsle.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1438,7 +1438,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsle.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1485,7 +1485,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1532,7 +1532,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1579,7 +1579,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsle.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1626,7 +1626,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsle.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1685,7 +1685,6 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vv v25, v8, v26, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1745,7 +1744,6 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsle.vv v25, v8, v26, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1805,7 +1803,6 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v28, (a0), zero ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsle.vv v25, v8, v28, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1841,7 +1838,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1876,7 +1873,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1911,7 +1908,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1946,7 +1943,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1981,7 +1978,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsle.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2016,7 +2013,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsle.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2051,7 +2048,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2086,7 +2083,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2121,7 +2118,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2156,7 +2153,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsle.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2191,7 +2188,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsle.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2226,7 +2223,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2261,7 +2258,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2296,7 +2293,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsle.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2331,7 +2328,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsle.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2366,7 +2363,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2401,7 +2398,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsle.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2436,7 +2433,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsle.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsle-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsle-rv64.ll @@ -31,12 +31,12 @@ define @intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu -; CHECK-NEXT: vmsle.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,mf8,tu,mu -; CHECK-NEXT: vmsle.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsle.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv1i8( @@ -83,12 +83,12 @@ define @intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu -; CHECK-NEXT: vmsle.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,mf4,tu,mu -; CHECK-NEXT: vmsle.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsle.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv2i8( @@ -135,12 +135,12 @@ define @intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu -; CHECK-NEXT: vmsle.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,tu,mu -; CHECK-NEXT: vmsle.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsle.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv4i8( @@ -187,12 +187,12 @@ define @intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu -; CHECK-NEXT: vmsle.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu -; CHECK-NEXT: vmsle.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsle.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv8i8( @@ -239,12 +239,12 @@ define @intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu -; CHECK-NEXT: vmsle.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e8,m2,tu,mu -; CHECK-NEXT: vmsle.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmsle.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv16i8( @@ -291,12 +291,12 @@ define @intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu -; CHECK-NEXT: vmsle.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e8,m4,tu,mu -; CHECK-NEXT: vmsle.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmsle.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv32i8( @@ -343,12 +343,12 @@ define @intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu -; CHECK-NEXT: vmsle.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,tu,mu -; CHECK-NEXT: vmsle.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsle.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv1i16( @@ -395,12 +395,12 @@ define @intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu -; CHECK-NEXT: vmsle.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,tu,mu -; CHECK-NEXT: vmsle.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsle.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv2i16( @@ -447,12 +447,12 @@ define @intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu -; CHECK-NEXT: vmsle.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu -; CHECK-NEXT: vmsle.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsle.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv4i16( @@ -499,12 +499,12 @@ define @intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu -; CHECK-NEXT: vmsle.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu -; CHECK-NEXT: vmsle.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmsle.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv8i16( @@ -551,12 +551,12 @@ define @intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu -; CHECK-NEXT: vmsle.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu -; CHECK-NEXT: vmsle.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmsle.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv16i16( @@ -603,12 +603,12 @@ define @intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu -; CHECK-NEXT: vmsle.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,tu,mu -; CHECK-NEXT: vmsle.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsle.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv1i32( @@ -655,12 +655,12 @@ define @intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu -; CHECK-NEXT: vmsle.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu -; CHECK-NEXT: vmsle.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsle.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv2i32( @@ -707,12 +707,12 @@ define @intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu -; CHECK-NEXT: vmsle.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu -; CHECK-NEXT: vmsle.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmsle.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv4i32( @@ -759,12 +759,12 @@ define @intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu -; CHECK-NEXT: vmsle.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e32,m4,tu,mu -; CHECK-NEXT: vmsle.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmsle.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv8i32( @@ -811,12 +811,12 @@ define @intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu -; CHECK-NEXT: vmsle.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu -; CHECK-NEXT: vmsle.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsle.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv1i64( @@ -863,12 +863,12 @@ define @intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu -; CHECK-NEXT: vmsle.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu -; CHECK-NEXT: vmsle.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmsle.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv2i64( @@ -915,12 +915,12 @@ define @intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu -; CHECK-NEXT: vmsle.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu -; CHECK-NEXT: vmsle.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmsle.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv4i64( @@ -968,7 +968,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf8,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1015,7 +1015,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1062,7 +1062,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1109,7 +1109,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1156,7 +1156,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsle.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1203,7 +1203,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsle.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1250,7 +1250,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1297,7 +1297,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1344,7 +1344,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1391,7 +1391,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsle.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1438,7 +1438,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsle.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1485,7 +1485,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1532,7 +1532,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1579,7 +1579,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsle.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1626,7 +1626,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsle.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1673,7 +1673,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e64,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1720,7 +1720,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e64,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsle.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1767,7 +1767,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e64,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsle.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1802,7 +1802,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1837,7 +1837,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1872,7 +1872,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1907,7 +1907,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1942,7 +1942,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsle.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1977,7 +1977,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsle.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2012,7 +2012,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2047,7 +2047,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2082,7 +2082,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2117,7 +2117,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsle.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2152,7 +2152,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsle.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2187,7 +2187,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2222,7 +2222,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2257,7 +2257,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsle.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2292,7 +2292,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsle.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2327,7 +2327,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2362,7 +2362,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsle.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2397,7 +2397,7 @@ ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsle.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll @@ -31,12 +31,12 @@ define @intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,mf8,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsleu.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv1i8( @@ -83,12 +83,12 @@ define @intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,mf4,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsleu.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv2i8( @@ -135,12 +135,12 @@ define @intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsleu.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv4i8( @@ -187,12 +187,12 @@ define @intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsleu.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv8i8( @@ -239,12 +239,12 @@ define @intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e8,m2,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmsleu.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv16i8( @@ -291,12 +291,12 @@ define @intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e8,m4,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmsleu.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv32i8( @@ -343,12 +343,12 @@ define @intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsleu.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv1i16( @@ -395,12 +395,12 @@ define @intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsleu.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv2i16( @@ -447,12 +447,12 @@ define @intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsleu.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv4i16( @@ -499,12 +499,12 @@ define @intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmsleu.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv8i16( @@ -551,12 +551,12 @@ define @intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmsleu.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv16i16( @@ -603,12 +603,12 @@ define @intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsleu.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv1i32( @@ -655,12 +655,12 @@ define @intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsleu.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv2i32( @@ -707,12 +707,12 @@ define @intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmsleu.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv4i32( @@ -759,12 +759,12 @@ define @intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e32,m4,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmsleu.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv8i32( @@ -811,12 +811,12 @@ define @intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsleu.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv1i64( @@ -863,12 +863,12 @@ define @intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmsleu.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv2i64( @@ -915,12 +915,12 @@ define @intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmsleu.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv4i64( @@ -968,7 +968,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf8,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1015,7 +1015,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1062,7 +1062,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1109,7 +1109,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1156,7 +1156,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsleu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1203,7 +1203,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsleu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1250,7 +1250,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1297,7 +1297,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1344,7 +1344,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1391,7 +1391,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsleu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1438,7 +1438,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsleu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1485,7 +1485,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1532,7 +1532,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1579,7 +1579,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsleu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1626,7 +1626,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsleu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1685,7 +1685,6 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vv v25, v8, v26, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1745,7 +1744,6 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsleu.vv v25, v8, v26, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1805,7 +1803,6 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v28, (a0), zero ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsleu.vv v25, v8, v28, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1841,7 +1838,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1876,7 +1873,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1911,7 +1908,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1946,7 +1943,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1981,7 +1978,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsleu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2016,7 +2013,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsleu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2051,7 +2048,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2086,7 +2083,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2121,7 +2118,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2156,7 +2153,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsleu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2191,7 +2188,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsleu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2226,7 +2223,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2261,7 +2258,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2296,7 +2293,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsleu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2331,7 +2328,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsleu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2366,7 +2363,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2401,7 +2398,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsleu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2436,7 +2433,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsleu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll @@ -31,12 +31,12 @@ define @intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,mf8,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsleu.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv1i8( @@ -83,12 +83,12 @@ define @intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,mf4,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsleu.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv2i8( @@ -135,12 +135,12 @@ define @intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsleu.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv4i8( @@ -187,12 +187,12 @@ define @intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsleu.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv8i8( @@ -239,12 +239,12 @@ define @intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e8,m2,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmsleu.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv16i8( @@ -291,12 +291,12 @@ define @intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e8,m4,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmsleu.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv32i8( @@ -343,12 +343,12 @@ define @intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsleu.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv1i16( @@ -395,12 +395,12 @@ define @intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsleu.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv2i16( @@ -447,12 +447,12 @@ define @intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsleu.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv4i16( @@ -499,12 +499,12 @@ define @intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmsleu.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv8i16( @@ -551,12 +551,12 @@ define @intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmsleu.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv16i16( @@ -603,12 +603,12 @@ define @intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsleu.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv1i32( @@ -655,12 +655,12 @@ define @intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsleu.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv2i32( @@ -707,12 +707,12 @@ define @intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmsleu.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv4i32( @@ -759,12 +759,12 @@ define @intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e32,m4,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmsleu.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv8i32( @@ -811,12 +811,12 @@ define @intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsleu.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv1i64( @@ -863,12 +863,12 @@ define @intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmsleu.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv2i64( @@ -915,12 +915,12 @@ define @intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu -; CHECK-NEXT: vmsleu.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmsleu.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv4i64( @@ -968,7 +968,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf8,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1015,7 +1015,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1062,7 +1062,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1109,7 +1109,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1156,7 +1156,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsleu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1203,7 +1203,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsleu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1250,7 +1250,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1297,7 +1297,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1344,7 +1344,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1391,7 +1391,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsleu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1438,7 +1438,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsleu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1485,7 +1485,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1532,7 +1532,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1579,7 +1579,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsleu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1626,7 +1626,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsleu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1673,7 +1673,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e64,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1720,7 +1720,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e64,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsleu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1767,7 +1767,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e64,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsleu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1802,7 +1802,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1837,7 +1837,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1872,7 +1872,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1907,7 +1907,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1942,7 +1942,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsleu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1977,7 +1977,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsleu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2012,7 +2012,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2047,7 +2047,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2082,7 +2082,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2117,7 +2117,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsleu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2152,7 +2152,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsleu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2187,7 +2187,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2222,7 +2222,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2257,7 +2257,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsleu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2292,7 +2292,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsleu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2327,7 +2327,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2362,7 +2362,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsleu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2397,7 +2397,7 @@ ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsleu.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll @@ -31,12 +31,12 @@ define @intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu -; CHECK-NEXT: vmslt.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,mf8,tu,mu -; CHECK-NEXT: vmslt.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmslt.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv1i8( @@ -83,12 +83,12 @@ define @intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu -; CHECK-NEXT: vmslt.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,mf4,tu,mu -; CHECK-NEXT: vmslt.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmslt.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv2i8( @@ -135,12 +135,12 @@ define @intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu -; CHECK-NEXT: vmslt.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,tu,mu -; CHECK-NEXT: vmslt.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmslt.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv4i8( @@ -187,12 +187,12 @@ define @intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu -; CHECK-NEXT: vmslt.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu -; CHECK-NEXT: vmslt.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmslt.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv8i8( @@ -239,12 +239,12 @@ define @intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu -; CHECK-NEXT: vmslt.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e8,m2,tu,mu -; CHECK-NEXT: vmslt.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmslt.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv16i8( @@ -291,12 +291,12 @@ define @intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu -; CHECK-NEXT: vmslt.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e8,m4,tu,mu -; CHECK-NEXT: vmslt.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmslt.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv32i8( @@ -343,12 +343,12 @@ define @intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu -; CHECK-NEXT: vmslt.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,tu,mu -; CHECK-NEXT: vmslt.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmslt.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv1i16( @@ -395,12 +395,12 @@ define @intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu -; CHECK-NEXT: vmslt.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,tu,mu -; CHECK-NEXT: vmslt.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmslt.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv2i16( @@ -447,12 +447,12 @@ define @intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu -; CHECK-NEXT: vmslt.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu -; CHECK-NEXT: vmslt.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmslt.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv4i16( @@ -499,12 +499,12 @@ define @intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu -; CHECK-NEXT: vmslt.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu -; CHECK-NEXT: vmslt.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmslt.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv8i16( @@ -551,12 +551,12 @@ define @intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu -; CHECK-NEXT: vmslt.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu -; CHECK-NEXT: vmslt.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmslt.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv16i16( @@ -603,12 +603,12 @@ define @intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu -; CHECK-NEXT: vmslt.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,tu,mu -; CHECK-NEXT: vmslt.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmslt.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv1i32( @@ -655,12 +655,12 @@ define @intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu -; CHECK-NEXT: vmslt.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu -; CHECK-NEXT: vmslt.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmslt.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv2i32( @@ -707,12 +707,12 @@ define @intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu -; CHECK-NEXT: vmslt.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu -; CHECK-NEXT: vmslt.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmslt.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv4i32( @@ -759,12 +759,12 @@ define @intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu -; CHECK-NEXT: vmslt.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e32,m4,tu,mu -; CHECK-NEXT: vmslt.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmslt.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv8i32( @@ -811,12 +811,12 @@ define @intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu -; CHECK-NEXT: vmslt.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu -; CHECK-NEXT: vmslt.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmslt.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv1i64( @@ -863,12 +863,12 @@ define @intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu -; CHECK-NEXT: vmslt.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu -; CHECK-NEXT: vmslt.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmslt.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv2i64( @@ -915,12 +915,12 @@ define @intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu -; CHECK-NEXT: vmslt.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu -; CHECK-NEXT: vmslt.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmslt.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv4i64( @@ -968,7 +968,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf8,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1015,7 +1015,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1062,7 +1062,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1109,7 +1109,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1156,7 +1156,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1203,7 +1203,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1250,7 +1250,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1297,7 +1297,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1344,7 +1344,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1391,7 +1391,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1438,7 +1438,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1485,7 +1485,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1532,7 +1532,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1579,7 +1579,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1626,7 +1626,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1685,7 +1685,6 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vv v25, v8, v26, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1745,7 +1744,6 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmslt.vv v25, v8, v26, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1805,7 +1803,6 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v28, (a0), zero ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmslt.vv v25, v8, v28, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1841,7 +1838,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v25, v8, -15, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1876,7 +1873,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v25, v8, -13, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1911,7 +1908,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v25, v8, -11, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1946,7 +1943,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v25, v8, -9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1981,7 +1978,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsle.vi v25, v8, -7, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2016,7 +2013,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsle.vi v25, v8, -5, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2051,7 +2048,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v25, v8, -3, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2086,7 +2083,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v25, v8, -1, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2121,7 +2118,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v25, v8, 0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2156,7 +2153,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsle.vi v25, v8, 2, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2191,7 +2188,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsle.vi v25, v8, 4, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2226,7 +2223,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v25, v8, 6, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2261,7 +2258,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v25, v8, 8, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2296,7 +2293,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsle.vi v25, v8, 10, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2331,7 +2328,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsle.vi v25, v8, 12, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2366,7 +2363,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v25, v8, 8, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2401,7 +2398,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsle.vi v25, v8, 8, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2436,7 +2433,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsle.vi v25, v8, 8, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll @@ -31,12 +31,12 @@ define @intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu -; CHECK-NEXT: vmslt.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,mf8,tu,mu -; CHECK-NEXT: vmslt.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmslt.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv1i8( @@ -83,12 +83,12 @@ define @intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu -; CHECK-NEXT: vmslt.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,mf4,tu,mu -; CHECK-NEXT: vmslt.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmslt.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv2i8( @@ -135,12 +135,12 @@ define @intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu -; CHECK-NEXT: vmslt.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,tu,mu -; CHECK-NEXT: vmslt.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmslt.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv4i8( @@ -187,12 +187,12 @@ define @intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu -; CHECK-NEXT: vmslt.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu -; CHECK-NEXT: vmslt.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmslt.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv8i8( @@ -239,12 +239,12 @@ define @intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu -; CHECK-NEXT: vmslt.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e8,m2,tu,mu -; CHECK-NEXT: vmslt.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmslt.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv16i8( @@ -291,12 +291,12 @@ define @intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu -; CHECK-NEXT: vmslt.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e8,m4,tu,mu -; CHECK-NEXT: vmslt.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmslt.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv32i8( @@ -343,12 +343,12 @@ define @intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu -; CHECK-NEXT: vmslt.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,tu,mu -; CHECK-NEXT: vmslt.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmslt.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv1i16( @@ -395,12 +395,12 @@ define @intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu -; CHECK-NEXT: vmslt.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,tu,mu -; CHECK-NEXT: vmslt.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmslt.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv2i16( @@ -447,12 +447,12 @@ define @intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu -; CHECK-NEXT: vmslt.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu -; CHECK-NEXT: vmslt.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmslt.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv4i16( @@ -499,12 +499,12 @@ define @intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu -; CHECK-NEXT: vmslt.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu -; CHECK-NEXT: vmslt.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmslt.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv8i16( @@ -551,12 +551,12 @@ define @intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu -; CHECK-NEXT: vmslt.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu -; CHECK-NEXT: vmslt.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmslt.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv16i16( @@ -603,12 +603,12 @@ define @intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu -; CHECK-NEXT: vmslt.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,tu,mu -; CHECK-NEXT: vmslt.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmslt.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv1i32( @@ -655,12 +655,12 @@ define @intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu -; CHECK-NEXT: vmslt.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu -; CHECK-NEXT: vmslt.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmslt.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv2i32( @@ -707,12 +707,12 @@ define @intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu -; CHECK-NEXT: vmslt.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu -; CHECK-NEXT: vmslt.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmslt.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv4i32( @@ -759,12 +759,12 @@ define @intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu -; CHECK-NEXT: vmslt.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e32,m4,tu,mu -; CHECK-NEXT: vmslt.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmslt.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv8i32( @@ -811,12 +811,12 @@ define @intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu -; CHECK-NEXT: vmslt.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu -; CHECK-NEXT: vmslt.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmslt.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv1i64( @@ -863,12 +863,12 @@ define @intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu -; CHECK-NEXT: vmslt.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu -; CHECK-NEXT: vmslt.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmslt.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv2i64( @@ -915,12 +915,12 @@ define @intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu -; CHECK-NEXT: vmslt.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu -; CHECK-NEXT: vmslt.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmslt.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmslt.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv4i64( @@ -968,7 +968,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf8,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1015,7 +1015,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1062,7 +1062,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1109,7 +1109,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1156,7 +1156,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1203,7 +1203,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1250,7 +1250,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1297,7 +1297,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1344,7 +1344,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1391,7 +1391,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1438,7 +1438,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1485,7 +1485,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1532,7 +1532,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1579,7 +1579,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1626,7 +1626,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1673,7 +1673,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e64,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1720,7 +1720,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e64,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1767,7 +1767,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e64,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1802,7 +1802,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v25, v8, -15, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1837,7 +1837,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v25, v8, -13, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1872,7 +1872,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v25, v8, -11, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1907,7 +1907,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v25, v8, -9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1942,7 +1942,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsle.vi v25, v8, -7, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1977,7 +1977,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsle.vi v25, v8, -5, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2012,7 +2012,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v25, v8, -3, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2047,7 +2047,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v25, v8, -1, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2082,7 +2082,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v25, v8, 0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2117,7 +2117,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsle.vi v25, v8, 2, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2152,7 +2152,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsle.vi v25, v8, 4, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2187,7 +2187,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v25, v8, 6, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2222,7 +2222,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v25, v8, 8, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2257,7 +2257,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsle.vi v25, v8, 10, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2292,7 +2292,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsle.vi v25, v8, 12, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2327,7 +2327,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v25, v8, 14, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2362,7 +2362,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsle.vi v25, v8, -16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2397,7 +2397,7 @@ ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsle.vi v25, v8, -14, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll @@ -31,12 +31,12 @@ define @intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,mf8,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsltu.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv1i8( @@ -83,12 +83,12 @@ define @intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,mf4,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsltu.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv2i8( @@ -135,12 +135,12 @@ define @intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsltu.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv4i8( @@ -187,12 +187,12 @@ define @intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsltu.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv8i8( @@ -239,12 +239,12 @@ define @intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e8,m2,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmsltu.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv16i8( @@ -291,12 +291,12 @@ define @intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e8,m4,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmsltu.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv32i8( @@ -343,12 +343,12 @@ define @intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsltu.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv1i16( @@ -395,12 +395,12 @@ define @intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsltu.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv2i16( @@ -447,12 +447,12 @@ define @intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsltu.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv4i16( @@ -499,12 +499,12 @@ define @intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmsltu.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv8i16( @@ -551,12 +551,12 @@ define @intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmsltu.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv16i16( @@ -603,12 +603,12 @@ define @intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsltu.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv1i32( @@ -655,12 +655,12 @@ define @intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsltu.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv2i32( @@ -707,12 +707,12 @@ define @intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmsltu.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv4i32( @@ -759,12 +759,12 @@ define @intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e32,m4,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmsltu.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv8i32( @@ -811,12 +811,12 @@ define @intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsltu.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv1i64( @@ -863,12 +863,12 @@ define @intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmsltu.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv2i64( @@ -915,12 +915,12 @@ define @intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmsltu.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv4i64( @@ -968,7 +968,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf8,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1015,7 +1015,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1062,7 +1062,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1109,7 +1109,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1156,7 +1156,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1203,7 +1203,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1250,7 +1250,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1297,7 +1297,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1344,7 +1344,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1391,7 +1391,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1438,7 +1438,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1485,7 +1485,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1532,7 +1532,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1579,7 +1579,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1626,7 +1626,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1685,7 +1685,6 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vv v25, v8, v26, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1745,7 +1744,6 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsltu.vv v25, v8, v26, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1805,7 +1803,6 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v28, (a0), zero ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsltu.vv v25, v8, v28, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1841,7 +1838,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v25, v8, -15, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1876,7 +1873,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v25, v8, -13, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1911,7 +1908,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v25, v8, -11, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1946,7 +1943,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v25, v8, -9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1981,7 +1978,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsleu.vi v25, v8, -7, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2016,7 +2013,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsleu.vi v25, v8, -5, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2051,7 +2048,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v25, v8, -3, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2086,7 +2083,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vv v25, v8, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2121,7 +2118,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v25, v8, 0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2156,7 +2153,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsleu.vi v25, v8, 2, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2191,7 +2188,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsleu.vi v25, v8, 4, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2226,7 +2223,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v25, v8, 6, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2261,7 +2258,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v25, v8, 8, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2296,7 +2293,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsleu.vi v25, v8, 10, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2331,7 +2328,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsleu.vi v25, v8, 12, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2366,7 +2363,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v25, v8, 14, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2401,7 +2398,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsleu.vi v25, v8, -16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2436,7 +2433,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsleu.vi v25, v8, -14, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll @@ -31,12 +31,12 @@ define @intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,mf8,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsltu.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv1i8( @@ -83,12 +83,12 @@ define @intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,mf4,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsltu.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv2i8( @@ -135,12 +135,12 @@ define @intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsltu.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv4i8( @@ -187,12 +187,12 @@ define @intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsltu.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv8i8( @@ -239,12 +239,12 @@ define @intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e8,m2,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmsltu.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv16i8( @@ -291,12 +291,12 @@ define @intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e8,m4,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmsltu.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv32i8( @@ -343,12 +343,12 @@ define @intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsltu.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv1i16( @@ -395,12 +395,12 @@ define @intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsltu.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv2i16( @@ -447,12 +447,12 @@ define @intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsltu.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv4i16( @@ -499,12 +499,12 @@ define @intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmsltu.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv8i16( @@ -551,12 +551,12 @@ define @intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmsltu.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv16i16( @@ -603,12 +603,12 @@ define @intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsltu.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv1i32( @@ -655,12 +655,12 @@ define @intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsltu.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv2i32( @@ -707,12 +707,12 @@ define @intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmsltu.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv4i32( @@ -759,12 +759,12 @@ define @intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e32,m4,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmsltu.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv8i32( @@ -811,12 +811,12 @@ define @intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsltu.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv1i64( @@ -863,12 +863,12 @@ define @intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmsltu.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv2i64( @@ -915,12 +915,12 @@ define @intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu -; CHECK-NEXT: vmsltu.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu -; CHECK-NEXT: vmsltu.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmsltu.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsltu.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv4i64( @@ -968,7 +968,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf8,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1015,7 +1015,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1062,7 +1062,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1109,7 +1109,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1156,7 +1156,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1203,7 +1203,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1250,7 +1250,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1297,7 +1297,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1344,7 +1344,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1391,7 +1391,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1438,7 +1438,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1485,7 +1485,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1532,7 +1532,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1579,7 +1579,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1626,7 +1626,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1673,7 +1673,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e64,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1720,7 +1720,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e64,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1767,7 +1767,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e64,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1802,7 +1802,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v25, v8, -15, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1837,7 +1837,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v25, v8, -13, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1872,7 +1872,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v25, v8, -11, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1907,7 +1907,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v25, v8, -9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1942,7 +1942,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsleu.vi v25, v8, -7, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1977,7 +1977,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsleu.vi v25, v8, -5, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2012,7 +2012,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v25, v8, -3, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2047,7 +2047,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vv v25, v8, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2082,7 +2082,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v25, v8, 0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2117,7 +2117,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsleu.vi v25, v8, 2, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2152,7 +2152,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsleu.vi v25, v8, 4, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2187,7 +2187,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v25, v8, 6, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2222,7 +2222,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v25, v8, 8, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2257,7 +2257,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsleu.vi v25, v8, 10, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2292,7 +2292,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsleu.vi v25, v8, 12, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2327,7 +2327,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v25, v8, 14, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2362,7 +2362,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsleu.vi v25, v8, -16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2397,7 +2397,7 @@ ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsleu.vi v25, v8, -14, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll @@ -31,12 +31,12 @@ define @intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu -; CHECK-NEXT: vmsne.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,mf8,tu,mu -; CHECK-NEXT: vmsne.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsne.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsne.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv1i8( @@ -83,12 +83,12 @@ define @intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu -; CHECK-NEXT: vmsne.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,mf4,tu,mu -; CHECK-NEXT: vmsne.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsne.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsne.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv2i8( @@ -135,12 +135,12 @@ define @intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu -; CHECK-NEXT: vmsne.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,tu,mu -; CHECK-NEXT: vmsne.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsne.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsne.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv4i8( @@ -187,12 +187,12 @@ define @intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu -; CHECK-NEXT: vmsne.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu -; CHECK-NEXT: vmsne.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsne.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsne.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv8i8( @@ -239,12 +239,12 @@ define @intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu -; CHECK-NEXT: vmsne.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e8,m2,tu,mu -; CHECK-NEXT: vmsne.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmsne.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsne.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv16i8( @@ -291,12 +291,12 @@ define @intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu -; CHECK-NEXT: vmsne.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e8,m4,tu,mu -; CHECK-NEXT: vmsne.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmsne.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsne.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv32i8( @@ -343,12 +343,12 @@ define @intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu -; CHECK-NEXT: vmsne.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,tu,mu -; CHECK-NEXT: vmsne.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsne.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsne.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv1i16( @@ -395,12 +395,12 @@ define @intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu -; CHECK-NEXT: vmsne.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,tu,mu -; CHECK-NEXT: vmsne.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsne.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsne.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv2i16( @@ -447,12 +447,12 @@ define @intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu -; CHECK-NEXT: vmsne.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu -; CHECK-NEXT: vmsne.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsne.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsne.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv4i16( @@ -499,12 +499,12 @@ define @intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu -; CHECK-NEXT: vmsne.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu -; CHECK-NEXT: vmsne.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmsne.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsne.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv8i16( @@ -551,12 +551,12 @@ define @intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu -; CHECK-NEXT: vmsne.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu -; CHECK-NEXT: vmsne.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmsne.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsne.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv16i16( @@ -603,12 +603,12 @@ define @intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu -; CHECK-NEXT: vmsne.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,tu,mu -; CHECK-NEXT: vmsne.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsne.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsne.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv1i32( @@ -655,12 +655,12 @@ define @intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu -; CHECK-NEXT: vmsne.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu -; CHECK-NEXT: vmsne.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsne.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsne.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv2i32( @@ -707,12 +707,12 @@ define @intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu -; CHECK-NEXT: vmsne.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu -; CHECK-NEXT: vmsne.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmsne.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsne.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv4i32( @@ -759,12 +759,12 @@ define @intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu -; CHECK-NEXT: vmsne.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e32,m4,tu,mu -; CHECK-NEXT: vmsne.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmsne.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsne.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv8i32( @@ -811,12 +811,12 @@ define @intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu -; CHECK-NEXT: vmsne.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu -; CHECK-NEXT: vmsne.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsne.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsne.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv1i64( @@ -863,12 +863,12 @@ define @intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu -; CHECK-NEXT: vmsne.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu -; CHECK-NEXT: vmsne.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmsne.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsne.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv2i64( @@ -915,12 +915,12 @@ define @intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu -; CHECK-NEXT: vmsne.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu -; CHECK-NEXT: vmsne.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmsne.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsne.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv4i64( @@ -968,7 +968,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf8,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1015,7 +1015,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1062,7 +1062,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1109,7 +1109,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1156,7 +1156,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsne.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1203,7 +1203,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsne.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1250,7 +1250,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1297,7 +1297,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1344,7 +1344,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1391,7 +1391,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsne.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1438,7 +1438,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsne.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1485,7 +1485,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1532,7 +1532,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1579,7 +1579,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsne.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1626,7 +1626,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsne.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1685,7 +1685,6 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vv v25, v8, v26, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1745,7 +1744,6 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsne.vv v25, v8, v26, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1805,7 +1803,6 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v28, (a0), zero ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsne.vv v25, v8, v28, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1841,7 +1838,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1876,7 +1873,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1911,7 +1908,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1946,7 +1943,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1981,7 +1978,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsne.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2016,7 +2013,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsne.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2051,7 +2048,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2086,7 +2083,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2121,7 +2118,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2156,7 +2153,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsne.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2191,7 +2188,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsne.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2226,7 +2223,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2261,7 +2258,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2296,7 +2293,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsne.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2331,7 +2328,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsne.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2366,7 +2363,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2401,7 +2398,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsne.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2436,7 +2433,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsne.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsne-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsne-rv64.ll @@ -31,12 +31,12 @@ define @intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu -; CHECK-NEXT: vmsne.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,mf8,tu,mu -; CHECK-NEXT: vmsne.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsne.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsne.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv1i8( @@ -83,12 +83,12 @@ define @intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu -; CHECK-NEXT: vmsne.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,mf4,tu,mu -; CHECK-NEXT: vmsne.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsne.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsne.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv2i8( @@ -135,12 +135,12 @@ define @intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu -; CHECK-NEXT: vmsne.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,tu,mu -; CHECK-NEXT: vmsne.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsne.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsne.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv4i8( @@ -187,12 +187,12 @@ define @intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu -; CHECK-NEXT: vmsne.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu -; CHECK-NEXT: vmsne.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsne.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsne.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv8i8( @@ -239,12 +239,12 @@ define @intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu -; CHECK-NEXT: vmsne.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e8,m2,tu,mu -; CHECK-NEXT: vmsne.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmsne.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsne.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv16i8( @@ -291,12 +291,12 @@ define @intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu -; CHECK-NEXT: vmsne.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e8,m4,tu,mu -; CHECK-NEXT: vmsne.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmsne.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsne.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv32i8( @@ -343,12 +343,12 @@ define @intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu -; CHECK-NEXT: vmsne.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,tu,mu -; CHECK-NEXT: vmsne.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsne.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsne.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv1i16( @@ -395,12 +395,12 @@ define @intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu -; CHECK-NEXT: vmsne.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,tu,mu -; CHECK-NEXT: vmsne.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsne.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsne.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv2i16( @@ -447,12 +447,12 @@ define @intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu -; CHECK-NEXT: vmsne.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu -; CHECK-NEXT: vmsne.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsne.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsne.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv4i16( @@ -499,12 +499,12 @@ define @intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu -; CHECK-NEXT: vmsne.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu -; CHECK-NEXT: vmsne.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmsne.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsne.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv8i16( @@ -551,12 +551,12 @@ define @intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu -; CHECK-NEXT: vmsne.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu -; CHECK-NEXT: vmsne.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmsne.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsne.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv16i16( @@ -603,12 +603,12 @@ define @intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu -; CHECK-NEXT: vmsne.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,tu,mu -; CHECK-NEXT: vmsne.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsne.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsne.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv1i32( @@ -655,12 +655,12 @@ define @intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu -; CHECK-NEXT: vmsne.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu -; CHECK-NEXT: vmsne.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsne.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsne.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv2i32( @@ -707,12 +707,12 @@ define @intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu -; CHECK-NEXT: vmsne.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu -; CHECK-NEXT: vmsne.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmsne.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsne.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv4i32( @@ -759,12 +759,12 @@ define @intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu -; CHECK-NEXT: vmsne.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e32,m4,tu,mu -; CHECK-NEXT: vmsne.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmsne.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsne.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv8i32( @@ -811,12 +811,12 @@ define @intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu -; CHECK-NEXT: vmsne.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu -; CHECK-NEXT: vmsne.vv v25, v9, v10, v0.t +; CHECK-NEXT: vmsne.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsne.vv v26, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv1i64( @@ -863,12 +863,12 @@ define @intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu -; CHECK-NEXT: vmsne.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu -; CHECK-NEXT: vmsne.vv v25, v10, v12, v0.t +; CHECK-NEXT: vmsne.vv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsne.vv v26, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv2i64( @@ -915,12 +915,12 @@ define @intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu -; CHECK-NEXT: vmsne.vv v0, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu -; CHECK-NEXT: vmsne.vv v25, v12, v16, v0.t +; CHECK-NEXT: vmsne.vv v25, v8, v12 +; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsne.vv v26, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv4i64( @@ -968,7 +968,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf8,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1015,7 +1015,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1062,7 +1062,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1109,7 +1109,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1156,7 +1156,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsne.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1203,7 +1203,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e8,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsne.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1250,7 +1250,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1297,7 +1297,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1344,7 +1344,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1391,7 +1391,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsne.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1438,7 +1438,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsne.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1485,7 +1485,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1532,7 +1532,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1579,7 +1579,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsne.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1626,7 +1626,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsne.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1673,7 +1673,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e64,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1720,7 +1720,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e64,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsne.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1767,7 +1767,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a1, e64,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsne.vx v25, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1802,7 +1802,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1837,7 +1837,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1872,7 +1872,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1907,7 +1907,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1942,7 +1942,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsne.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -1977,7 +1977,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsne.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2012,7 +2012,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2047,7 +2047,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2082,7 +2082,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2117,7 +2117,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsne.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2152,7 +2152,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsne.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2187,7 +2187,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2222,7 +2222,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2257,7 +2257,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsne.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2292,7 +2292,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsne.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2327,7 +2327,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2362,7 +2362,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsne.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -2397,7 +2397,7 @@ ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu +; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsne.vi v25, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v25