diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -3275,7 +3275,8 @@ string Constraint = ""> { let VLMul = MInfo.value in { def "_" # MInfo.MX : VPseudoTernaryNoMaskWithPolicy; - def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMaskPolicy; + def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMaskPolicy, + RISCVMaskedPseudo; } } diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-masked-vops.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-masked-vops.ll --- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-masked-vops.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-masked-vops.ll @@ -98,9 +98,7 @@ ; CHECK-LABEL: vpmerge_vslideup: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vslideup.vx v10, v9, a0, v0.t -; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret %a = call @llvm.riscv.vslideup.mask.nxv2i32( %passthru, %v, i64 %x, %m, i64 %vl, i64 0) %splat = insertelement poison, i1 -1, i32 0 @@ -114,9 +112,7 @@ ; CHECK-LABEL: vpmerge_vslidedown: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vslidedown.vx v10, v9, a0, v0.t -; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: vslidedown.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret %a = call @llvm.riscv.vslidedown.mask.nxv2i32( %passthru, %v, i64 %x, %m, i64 %vl, i64 0) %splat = insertelement poison, i1 -1, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll --- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll @@ -751,9 +751,8 @@ define @vpselect_vslideup( %passthru, %v, i64 %x, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpselect_vslideup: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vslideup.vx v10, v9, a0 -; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret %1 = zext i32 %vl to i64 %a = call @llvm.riscv.vslideup.nxv2i32( undef, %v, i64 %x, i64 %1, i64 0) @@ -765,9 +764,8 @@ define @vpselect_vslidedown( %passthru, %v, i64 %x, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpselect_vslidedown: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vslidedown.vx v9, v9, a0 -; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vslidedown.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret %1 = zext i32 %vl to i64 %a = call @llvm.riscv.vslidedown.nxv2i32( undef, %v, i64 %x, i64 %1, i64 0)