diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll @@ -7566,3 +7566,50 @@ %red = call i64 @llvm.vector.reduce.mul.v64i64(<64 x i64> %v) ret i64 %red } + +define i8 @fold_vreduce_xor_v2i8() { +; CHECK-LABEL: fold_vreduce_xor_v2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vmv.v.i v8, 4 +; CHECK-NEXT: vmv.s.x v9, zero +; CHECK-NEXT: vredxor.vs v8, v8, v9 +; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: ret + %elt.head = insertelement <2 x i8> poison, i8 4, i32 0 + %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer + %red = call i8 @llvm.vector.reduce.xor.v2i8(<2 x i8> %vb) + ret i8 %red +} + +define i8 @fold_vreduce_or_v2i8() { +; CHECK-LABEL: fold_vreduce_or_v2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vmv.v.i v8, 4 +; CHECK-NEXT: vmv.s.x v9, zero +; CHECK-NEXT: vredor.vs v8, v8, v9 +; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: ret + %elt.head = insertelement <2 x i8> poison, i8 4, i32 0 + %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer + %red = call i8 @llvm.vector.reduce.or.v2i8(<2 x i8> %vb) + ret i8 %red +} + +define i8 @fold_vreduce_and_v2i8() { +; CHECK-LABEL: fold_vreduce_and_v2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vmv.v.i v8, 4 +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vmv.v.i v9, -1 +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vredand.vs v8, v8, v9 +; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: ret + %elt.head = insertelement <2 x i8> poison, i8 4, i32 0 + %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer + %red = call i8 @llvm.vector.reduce.and.v2i8(<2 x i8> %vb) + ret i8 %red +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-int.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-int.ll @@ -2259,3 +2259,50 @@ %red = call i64 @llvm.vector.reduce.xor.nxv4i64( %v) ret i64 %red } + +define signext i8 @fold_vreduce_xor_nxv2i8( %v) { +; CHECK-LABEL: fold_vreduce_xor_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vmv.v.i v8, 4 +; CHECK-NEXT: vmv.s.x v9, zero +; CHECK-NEXT: vredxor.vs v8, v8, v9 +; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: ret + %head = insertelement poison, i8 4, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %red = call i8 @llvm.vector.reduce.xor.nxv2i8( %splat) + ret i8 %red +} + +define signext i8 @fold_vreduce_or_nxv2i8( %v) { +; CHECK-LABEL: fold_vreduce_or_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vmv.v.i v8, 4 +; CHECK-NEXT: vmv.s.x v9, zero +; CHECK-NEXT: vredor.vs v8, v8, v9 +; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: ret + %head = insertelement poison, i8 4, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %red = call i8 @llvm.vector.reduce.or.nxv2i8( %splat) + ret i8 %red +} + +define signext i8 @fold_vreduce_and_nxv2i8( %v) { +; CHECK-LABEL: fold_vreduce_and_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vmv.v.i v8, 4 +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vmv.v.i v9, -1 +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vredand.vs v8, v8, v9 +; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: ret + %head = insertelement poison, i8 4, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %red = call i8 @llvm.vector.reduce.and.nxv2i8( %splat) + ret i8 %red +}