diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -8865,6 +8865,20 @@ if (VT == MVT::i1) Opcode = ISD::VP_REDUCE_XOR; break; + case ISD::VP_REDUCE_SMAX: + case ISD::VP_REDUCE_UMIN: + // If it is VP_REDUCE_SMAX/VP_REDUCE_UMIN mask operation then turn it to + // VP_REDUCE_AND. + if (VT == MVT::i1) + Opcode = ISD::VP_REDUCE_AND; + break; + case ISD::VP_REDUCE_SMIN: + case ISD::VP_REDUCE_UMAX: + // If it is VP_REDUCE_SMIN/VP_REDUCE_UMAX mask operation then turn it to + // VP_REDUCE_OR. + if (VT == MVT::i1) + Opcode = ISD::VP_REDUCE_OR; + break; } // Memoize nodes. diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll @@ -400,3 +400,493 @@ %r = call i1 @llvm.vp.reduce.add.v16i1(i1 %s, <16 x i1> %v, <16 x i1> %m, i32 %evl) ret i1 %r } + +declare i1 @llvm.vp.reduce.smax.nxv1i1(i1, <1 x i1>, <1 x i1>, i32) + +define signext i1 @vpreduce_smax_nxv1i1(i1 signext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_smax_nxv1i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vmnot.m v9, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.smax.nxv1i1(i1 %s, <1 x i1> %v, <1 x i1> %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.smax.nxv2i1(i1, <2 x i1>, <2 x i1>, i32) + +define signext i1 @vpreduce_smax_nxv2i1(i1 signext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_smax_nxv2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vmnot.m v9, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.smax.nxv2i1(i1 %s, <2 x i1> %v, <2 x i1> %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.smax.nxv4i1(i1, <4 x i1>, <4 x i1>, i32) + +define signext i1 @vpreduce_smax_nxv4i1(i1 signext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_smax_nxv4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vmnot.m v9, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.smax.nxv4i1(i1 %s, <4 x i1> %v, <4 x i1> %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.smax.nxv8i1(i1, <8 x i1>, <8 x i1>, i32) + +define signext i1 @vpreduce_smax_nxv8i1(i1 signext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_smax_nxv8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vmnot.m v9, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.smax.nxv8i1(i1 %s, <8 x i1> %v, <8 x i1> %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.smax.nxv16i1(i1, <16 x i1>, <16 x i1>, i32) + +define signext i1 @vpreduce_smax_nxv16i1(i1 signext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_smax_nxv16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vmnot.m v9, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.smax.nxv16i1(i1 %s, <16 x i1> %v, <16 x i1> %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.smax.nxv32i1(i1, <32 x i1>, <32 x i1>, i32) + +define signext i1 @vpreduce_smax_nxv32i1(i1 signext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_smax_nxv32i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vmnot.m v9, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.smax.nxv32i1(i1 %s, <32 x i1> %v, <32 x i1> %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.smax.nxv64i1(i1, <64 x i1>, <64 x i1>, i32) + +define signext i1 @vpreduce_smax_nxv64i1(i1 signext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_smax_nxv64i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vmnot.m v9, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.smax.nxv64i1(i1 %s, <64 x i1> %v, <64 x i1> %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.smin.nxv1i1(i1, <1 x i1>, <1 x i1>, i32) + +define signext i1 @vpreduce_smin_nxv1i1(i1 signext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_smin_nxv1i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: snez a1, a1 +; CHECK-NEXT: or a0, a1, a0 +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.smin.nxv1i1(i1 %s, <1 x i1> %v, <1 x i1> %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.smin.nxv2i1(i1, <2 x i1>, <2 x i1>, i32) + +define signext i1 @vpreduce_smin_nxv2i1(i1 signext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_smin_nxv2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: snez a1, a1 +; CHECK-NEXT: or a0, a1, a0 +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.smin.nxv2i1(i1 %s, <2 x i1> %v, <2 x i1> %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.smin.nxv4i1(i1, <4 x i1>, <4 x i1>, i32) + +define signext i1 @vpreduce_smin_nxv4i1(i1 signext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_smin_nxv4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: snez a1, a1 +; CHECK-NEXT: or a0, a1, a0 +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.smin.nxv4i1(i1 %s, <4 x i1> %v, <4 x i1> %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.smin.nxv8i1(i1, <8 x i1>, <8 x i1>, i32) + +define signext i1 @vpreduce_smin_nxv8i1(i1 signext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_smin_nxv8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: snez a1, a1 +; CHECK-NEXT: or a0, a1, a0 +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.smin.nxv8i1(i1 %s, <8 x i1> %v, <8 x i1> %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.smin.nxv16i1(i1, <16 x i1>, <16 x i1>, i32) + +define signext i1 @vpreduce_smin_nxv16i1(i1 signext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_smin_nxv16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: snez a1, a1 +; CHECK-NEXT: or a0, a1, a0 +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.smin.nxv16i1(i1 %s, <16 x i1> %v, <16 x i1> %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.smin.nxv32i1(i1, <32 x i1>, <32 x i1>, i32) + +define signext i1 @vpreduce_smin_nxv32i1(i1 signext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_smin_nxv32i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: snez a1, a1 +; CHECK-NEXT: or a0, a1, a0 +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.smin.nxv32i1(i1 %s, <32 x i1> %v, <32 x i1> %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.smin.nxv64i1(i1, <64 x i1>, <64 x i1>, i32) + +define signext i1 @vpreduce_smin_nxv64i1(i1 signext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_smin_nxv64i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: snez a1, a1 +; CHECK-NEXT: or a0, a1, a0 +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.smin.nxv64i1(i1 %s, <64 x i1> %v, <64 x i1> %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.umax.nxv1i1(i1, <1 x i1>, <1 x i1>, i32) + +define signext i1 @vpreduce_umax_nxv1i1(i1 signext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_umax_nxv1i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: snez a1, a1 +; CHECK-NEXT: or a0, a1, a0 +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.umax.nxv1i1(i1 %s, <1 x i1> %v, <1 x i1> %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.umax.nxv2i1(i1, <2 x i1>, <2 x i1>, i32) + +define signext i1 @vpreduce_umax_nxv2i1(i1 signext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_umax_nxv2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: snez a1, a1 +; CHECK-NEXT: or a0, a1, a0 +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.umax.nxv2i1(i1 %s, <2 x i1> %v, <2 x i1> %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.umax.nxv4i1(i1, <4 x i1>, <4 x i1>, i32) + +define signext i1 @vpreduce_umax_nxv4i1(i1 signext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_umax_nxv4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: snez a1, a1 +; CHECK-NEXT: or a0, a1, a0 +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.umax.nxv4i1(i1 %s, <4 x i1> %v, <4 x i1> %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.umax.nxv8i1(i1, <8 x i1>, <8 x i1>, i32) + +define signext i1 @vpreduce_umax_nxv8i1(i1 signext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_umax_nxv8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: snez a1, a1 +; CHECK-NEXT: or a0, a1, a0 +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.umax.nxv8i1(i1 %s, <8 x i1> %v, <8 x i1> %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.umax.nxv16i1(i1, <16 x i1>, <16 x i1>, i32) + +define signext i1 @vpreduce_umax_nxv16i1(i1 signext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_umax_nxv16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: snez a1, a1 +; CHECK-NEXT: or a0, a1, a0 +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.umax.nxv16i1(i1 %s, <16 x i1> %v, <16 x i1> %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.umax.nxv32i1(i1, <32 x i1>, <32 x i1>, i32) + +define signext i1 @vpreduce_umax_nxv32i1(i1 signext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_umax_nxv32i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: snez a1, a1 +; CHECK-NEXT: or a0, a1, a0 +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.umax.nxv32i1(i1 %s, <32 x i1> %v, <32 x i1> %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.umax.nxv64i1(i1, <64 x i1>, <64 x i1>, i32) + +define signext i1 @vpreduce_umax_nxv64i1(i1 signext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_umax_nxv64i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: snez a1, a1 +; CHECK-NEXT: or a0, a1, a0 +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.umax.nxv64i1(i1 %s, <64 x i1> %v, <64 x i1> %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.umin.nxv1i1(i1, <1 x i1>, <1 x i1>, i32) + +define signext i1 @vpreduce_umin_nxv1i1(i1 signext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_umin_nxv1i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vmnot.m v9, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.umin.nxv1i1(i1 %s, <1 x i1> %v, <1 x i1> %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.umin.nxv2i1(i1, <2 x i1>, <2 x i1>, i32) + +define signext i1 @vpreduce_umin_nxv2i1(i1 signext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_umin_nxv2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vmnot.m v9, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.umin.nxv2i1(i1 %s, <2 x i1> %v, <2 x i1> %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.umin.nxv4i1(i1, <4 x i1>, <4 x i1>, i32) + +define signext i1 @vpreduce_umin_nxv4i1(i1 signext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_umin_nxv4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vmnot.m v9, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.umin.nxv4i1(i1 %s, <4 x i1> %v, <4 x i1> %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.umin.nxv8i1(i1, <8 x i1>, <8 x i1>, i32) + +define signext i1 @vpreduce_umin_nxv8i1(i1 signext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_umin_nxv8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vmnot.m v9, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.umin.nxv8i1(i1 %s, <8 x i1> %v, <8 x i1> %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.umin.nxv16i1(i1, <16 x i1>, <16 x i1>, i32) + +define signext i1 @vpreduce_umin_nxv16i1(i1 signext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_umin_nxv16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vmnot.m v9, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.umin.nxv16i1(i1 %s, <16 x i1> %v, <16 x i1> %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.umin.nxv32i1(i1, <32 x i1>, <32 x i1>, i32) + +define signext i1 @vpreduce_umin_nxv32i1(i1 signext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_umin_nxv32i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vmnot.m v9, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.umin.nxv32i1(i1 %s, <32 x i1> %v, <32 x i1> %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.umin.nxv64i1(i1, <64 x i1>, <64 x i1>, i32) + +define signext i1 @vpreduce_umin_nxv64i1(i1 signext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_umin_nxv64i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vmnot.m v9, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.umin.nxv64i1(i1 %s, <64 x i1> %v, <64 x i1> %m, i32 %evl) + ret i1 %r +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll @@ -538,3 +538,494 @@ %r = call i1 @llvm.vp.reduce.add.nxv64i1(i1 %s, %v, %m, i32 %evl) ret i1 %r } + + +declare i1 @llvm.vp.reduce.smax.nxv1i1(i1, , , i32) + +define signext i1 @vpreduce_smax_nxv1i1(i1 signext %s, %v, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_smax_nxv1i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vmnot.m v9, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.smax.nxv1i1(i1 %s, %v, %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.smax.nxv2i1(i1, , , i32) + +define signext i1 @vpreduce_smax_nxv2i1(i1 signext %s, %v, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_smax_nxv2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vmnot.m v9, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.smax.nxv2i1(i1 %s, %v, %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.smax.nxv4i1(i1, , , i32) + +define signext i1 @vpreduce_smax_nxv4i1(i1 signext %s, %v, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_smax_nxv4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vmnot.m v9, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.smax.nxv4i1(i1 %s, %v, %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.smax.nxv8i1(i1, , , i32) + +define signext i1 @vpreduce_smax_nxv8i1(i1 signext %s, %v, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_smax_nxv8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vmnot.m v9, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.smax.nxv8i1(i1 %s, %v, %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.smax.nxv16i1(i1, , , i32) + +define signext i1 @vpreduce_smax_nxv16i1(i1 signext %s, %v, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_smax_nxv16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vmnot.m v9, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.smax.nxv16i1(i1 %s, %v, %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.smax.nxv32i1(i1, , , i32) + +define signext i1 @vpreduce_smax_nxv32i1(i1 signext %s, %v, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_smax_nxv32i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vmnot.m v9, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.smax.nxv32i1(i1 %s, %v, %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.smax.nxv64i1(i1, , , i32) + +define signext i1 @vpreduce_smax_nxv64i1(i1 signext %s, %v, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_smax_nxv64i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vmnot.m v9, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.smax.nxv64i1(i1 %s, %v, %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.smin.nxv1i1(i1, , , i32) + +define signext i1 @vpreduce_smin_nxv1i1(i1 signext %s, %v, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_smin_nxv1i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: snez a1, a1 +; CHECK-NEXT: or a0, a1, a0 +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.smin.nxv1i1(i1 %s, %v, %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.smin.nxv2i1(i1, , , i32) + +define signext i1 @vpreduce_smin_nxv2i1(i1 signext %s, %v, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_smin_nxv2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: snez a1, a1 +; CHECK-NEXT: or a0, a1, a0 +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.smin.nxv2i1(i1 %s, %v, %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.smin.nxv4i1(i1, , , i32) + +define signext i1 @vpreduce_smin_nxv4i1(i1 signext %s, %v, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_smin_nxv4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: snez a1, a1 +; CHECK-NEXT: or a0, a1, a0 +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.smin.nxv4i1(i1 %s, %v, %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.smin.nxv8i1(i1, , , i32) + +define signext i1 @vpreduce_smin_nxv8i1(i1 signext %s, %v, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_smin_nxv8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: snez a1, a1 +; CHECK-NEXT: or a0, a1, a0 +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.smin.nxv8i1(i1 %s, %v, %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.smin.nxv16i1(i1, , , i32) + +define signext i1 @vpreduce_smin_nxv16i1(i1 signext %s, %v, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_smin_nxv16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: snez a1, a1 +; CHECK-NEXT: or a0, a1, a0 +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.smin.nxv16i1(i1 %s, %v, %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.smin.nxv32i1(i1, , , i32) + +define signext i1 @vpreduce_smin_nxv32i1(i1 signext %s, %v, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_smin_nxv32i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: snez a1, a1 +; CHECK-NEXT: or a0, a1, a0 +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.smin.nxv32i1(i1 %s, %v, %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.smin.nxv64i1(i1, , , i32) + +define signext i1 @vpreduce_smin_nxv64i1(i1 signext %s, %v, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_smin_nxv64i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: snez a1, a1 +; CHECK-NEXT: or a0, a1, a0 +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.smin.nxv64i1(i1 %s, %v, %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.umax.nxv1i1(i1, , , i32) + +define signext i1 @vpreduce_umax_nxv1i1(i1 signext %s, %v, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_umax_nxv1i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: snez a1, a1 +; CHECK-NEXT: or a0, a1, a0 +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.umax.nxv1i1(i1 %s, %v, %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.umax.nxv2i1(i1, , , i32) + +define signext i1 @vpreduce_umax_nxv2i1(i1 signext %s, %v, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_umax_nxv2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: snez a1, a1 +; CHECK-NEXT: or a0, a1, a0 +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.umax.nxv2i1(i1 %s, %v, %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.umax.nxv4i1(i1, , , i32) + +define signext i1 @vpreduce_umax_nxv4i1(i1 signext %s, %v, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_umax_nxv4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: snez a1, a1 +; CHECK-NEXT: or a0, a1, a0 +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.umax.nxv4i1(i1 %s, %v, %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.umax.nxv8i1(i1, , , i32) + +define signext i1 @vpreduce_umax_nxv8i1(i1 signext %s, %v, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_umax_nxv8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: snez a1, a1 +; CHECK-NEXT: or a0, a1, a0 +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.umax.nxv8i1(i1 %s, %v, %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.umax.nxv16i1(i1, , , i32) + +define signext i1 @vpreduce_umax_nxv16i1(i1 signext %s, %v, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_umax_nxv16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: snez a1, a1 +; CHECK-NEXT: or a0, a1, a0 +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.umax.nxv16i1(i1 %s, %v, %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.umax.nxv32i1(i1, , , i32) + +define signext i1 @vpreduce_umax_nxv32i1(i1 signext %s, %v, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_umax_nxv32i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: snez a1, a1 +; CHECK-NEXT: or a0, a1, a0 +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.umax.nxv32i1(i1 %s, %v, %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.umax.nxv64i1(i1, , , i32) + +define signext i1 @vpreduce_umax_nxv64i1(i1 signext %s, %v, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_umax_nxv64i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: snez a1, a1 +; CHECK-NEXT: or a0, a1, a0 +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.umax.nxv64i1(i1 %s, %v, %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.umin.nxv1i1(i1, , , i32) + +define signext i1 @vpreduce_umin_nxv1i1(i1 signext %s, %v, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_umin_nxv1i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vmnot.m v9, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.umin.nxv1i1(i1 %s, %v, %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.umin.nxv2i1(i1, , , i32) + +define signext i1 @vpreduce_umin_nxv2i1(i1 signext %s, %v, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_umin_nxv2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vmnot.m v9, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.umin.nxv2i1(i1 %s, %v, %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.umin.nxv4i1(i1, , , i32) + +define signext i1 @vpreduce_umin_nxv4i1(i1 signext %s, %v, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_umin_nxv4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vmnot.m v9, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.umin.nxv4i1(i1 %s, %v, %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.umin.nxv8i1(i1, , , i32) + +define signext i1 @vpreduce_umin_nxv8i1(i1 signext %s, %v, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_umin_nxv8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vmnot.m v9, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.umin.nxv8i1(i1 %s, %v, %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.umin.nxv16i1(i1, , , i32) + +define signext i1 @vpreduce_umin_nxv16i1(i1 signext %s, %v, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_umin_nxv16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vmnot.m v9, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.umin.nxv16i1(i1 %s, %v, %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.umin.nxv32i1(i1, , , i32) + +define signext i1 @vpreduce_umin_nxv32i1(i1 signext %s, %v, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_umin_nxv32i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vmnot.m v9, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.umin.nxv32i1(i1 %s, %v, %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.umin.nxv64i1(i1, , , i32) + +define signext i1 @vpreduce_umin_nxv64i1(i1 signext %s, %v, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_umin_nxv64i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vmnot.m v9, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.umin.nxv64i1(i1 %s, %v, %m, i32 %evl) + ret i1 %r +}