diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -2027,7 +2027,8 @@ // 15. Vector Reduction Operations // 15.1. Vector Single-Width Integer Reduction Instructions -let MaskedPolicyScheme = NonePolicy, +let UnMaskedPolicyScheme = HasPassthruOperand, + MaskedPolicyScheme = HasPassthruOperand, IsPrototypeDefaultTU = true, HasMaskPolicy = false in { defm vredsum : RVVIntReductionBuiltinSet; diff --git a/clang/lib/Support/RISCVVIntrinsicUtils.cpp b/clang/lib/Support/RISCVVIntrinsicUtils.cpp --- a/clang/lib/Support/RISCVVIntrinsicUtils.cpp +++ b/clang/lib/Support/RISCVVIntrinsicUtils.cpp @@ -937,6 +937,7 @@ default: break; } + bool HasPassthruOp = DefaultScheme == PolicyScheme::HasPassthruOperand; if (IsMasked) { // If HasMaskedOffOperand, insert result type as first input operand if // need. @@ -954,6 +955,10 @@ NewPrototype.insert(NewPrototype.begin() + NF + 1, MaskoffType); } } + // Erase passthru operand for TAM + if (NF == 1 && IsPrototypeDefaultTU && DefaultPolicy == Policy::TAMA && + HasPassthruOp && !HasMaskedOffOperand) + NewPrototype.erase(NewPrototype.begin() + 1); if (HasMaskedOffOperand && NF > 1) { // Convert // (void, op0 address, op1 address, ..., maskedoff0, maskedoff1, ...) @@ -967,7 +972,6 @@ NewPrototype.insert(NewPrototype.begin() + 1, PrototypeDescriptor::Mask); } } else if (NF == 1) { - bool HasPassthruOp = DefaultScheme == PolicyScheme::HasPassthruOperand; if (DefaultPolicy == Policy::TU && HasPassthruOp && !IsPrototypeDefaultTU) NewPrototype.insert(NewPrototype.begin(), NewPrototype[0]); else if (DefaultPolicy == Policy::TA && HasPassthruOp && diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmax.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmax.c @@ -194,3 +194,39 @@ vfloat64m1_t scalar, size_t vl) { return vfredmax(mask, dst, vector, scalar, vl); } + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tu(vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_tu(merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_ta(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_tum(mask, merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_tam(mask, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmin.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmin.c @@ -194,3 +194,39 @@ vfloat64m1_t scalar, size_t vl) { return vfredmin(mask, dst, vector, scalar, vl); } + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tu(vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_tu(merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_ta(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_tum(mask, merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_tam(mask, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c @@ -392,3 +392,75 @@ vfloat64m1_t scalar, size_t vl) { return vfredosum(mask, dst, vector, scalar, vl); } + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tu(vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_tu(merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv1f32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_ta(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_tum(mask, merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tu(vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_tu(merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_ta(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_tum(mask, merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_tam(mask, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c @@ -224,3 +224,75 @@ vfloat64m1_t scalar, size_t vl) { return vfwredosum(mask, dst, vector, scalar, vl); } + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tu (vfloat64m1_t merge, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_tu(merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv1f32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_ta (vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tum (vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_tum(mask, merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tam (vbool64_t mask, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tu(vfloat64m1_t merge, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_tu(merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_ta(vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tum(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_tum(mask, merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_tam(mask, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredand.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredand.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredand.c @@ -927,3 +927,75 @@ vuint64m1_t scalar, size_t vl) { return vredand(mask, dst, vector, scalar, vl); } + +// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32mf2_i32m1_tu(vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredand_tu(merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_tu(merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredand_tum(mask, merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_tum(mask, merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmax.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmax.c @@ -927,3 +927,75 @@ vuint64m1_t scalar, size_t vl) { return vredmaxu(mask, dst, vector, scalar, vl); } + +// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32mf2_i32m1_tu(vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_tu(merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_tu(merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_tum(mask, merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_tum(mask, merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_tam(mask, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmin.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmin.c @@ -927,3 +927,75 @@ vuint64m1_t scalar, size_t vl) { return vredminu(mask, dst, vector, scalar, vl); } + +// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32mf2_i32m1_tu(vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_tu(merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_tu(merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_tum(mask, merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_tum(mask, merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_tam(mask, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredor.c @@ -927,3 +927,75 @@ size_t vl) { return vredor(mask, dst, vector, scalar, vl); } + +// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32mf2_i32m1_tu(vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredor_tu(merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_tu(merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredor_tum(mask, merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_tum(mask, merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredsum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredsum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredsum.c @@ -927,3 +927,75 @@ vuint64m1_t scalar, size_t vl) { return vredsum(mask, dst, vector, scalar, vl); } + +// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32mf2_i32m1_tu(vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_tu(merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_tu(merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_tum(mask, merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_tum(mask, merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredxor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredxor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredxor.c @@ -927,3 +927,75 @@ vuint64m1_t scalar, size_t vl) { return vredxor(mask, dst, vector, scalar, vl); } + +// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32mf2_i32m1_tu(vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_tu(merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_tu(merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_tum(mask, merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_tum(mask, merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwredsum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwredsum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwredsum.c @@ -759,3 +759,75 @@ vuint64m1_t scalar, size_t vl) { return vwredsumu(mask, dst, vector, scalar, vl); } + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tu(vint64m1_t merge, vint32mf2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_tu(merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tu(vuint64m1_t merge, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_tu(merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32mf2_i64m1_ta(vint32mf2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_ta(vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tum(vbool64_t mask, vint64m1_t merge, vint32mf2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_tum(mask, merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tum(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_tum(mask, merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tam(vbool64_t mask, vint32mf2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_tam(mask, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmax.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmax.c @@ -303,3 +303,39 @@ vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_m (vbool2_t mask, vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { return vfredmax_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl); } + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tu(vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32mf2_f32m1_tu(merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_ta(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32mf2_f32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32mf2_f32m1_tum(mask, merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32mf2_f32m1_tam(mask, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmin.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmin.c @@ -303,3 +303,39 @@ vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_m (vbool2_t mask, vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { return vfredmin_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl); } + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tu(vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32mf2_f32m1_tu(merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_ta(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32mf2_f32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32mf2_f32m1_tum(mask, merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32mf2_f32m1_tam(mask, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c @@ -609,3 +609,75 @@ vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { return vfredosum_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl); } + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tu(vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32mf2_f32m1_tu(merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv1f32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_ta(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32mf2_f32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32mf2_f32m1_tum(mask, merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32mf2_f32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tu(vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32mf2_f32m1_tu(merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_ta(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32mf2_f32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32mf2_f32m1_tum(mask, merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32mf2_f32m1_tam(mask, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c @@ -441,3 +441,75 @@ vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_m (vbool2_t mask, vfloat32m1_t dest, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { return vfwredusum_vs_f16m8_f32m1_m(mask, dest, vector, scalar, vl); } + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tu(vfloat64m1_t merge, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_vs_f32mf2_f64m1_tu(merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv1f32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_ta(vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_vs_f32mf2_f64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tum(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_vs_f32mf2_f64m1_tum(mask, merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_vs_f32mf2_f64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tu(vfloat64m1_t merge, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_vs_f32mf2_f64m1_tu(merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_ta(vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_vs_f32mf2_f64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tum(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_vs_f32mf2_f64m1_tum(mask, merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_vs_f32mf2_f64m1_tam(mask, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredand.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredand.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredand.c @@ -927,3 +927,75 @@ vuint64m1_t scalar, size_t vl) { return vredand_vs_u64m8_u64m1_m(mask, dst, vector, scalar, vl); } + +// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32mf2_i32m1_tu(vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32mf2_i32m1_tu(merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32mf2_u32m1_tu(merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32mf2_i32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32mf2_u32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32mf2_i32m1_tum(mask, merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32mf2_u32m1_tum(mask, merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32mf2_i32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32mf2_u32m1_tam(mask, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmax.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmax.c @@ -927,3 +927,75 @@ vuint64m1_t scalar, size_t vl) { return vredmaxu_vs_u64m8_u64m1_m(mask, dst, vector, scalar, vl); } + +// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32mf2_i32m1_tu(vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32mf2_i32m1_tu(merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32mf2_u32m1_tu(merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32mf2_i32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32mf2_u32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32mf2_i32m1_tum(mask, merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32mf2_u32m1_tum(mask, merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32mf2_i32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32mf2_u32m1_tam(mask, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmin.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmin.c @@ -927,3 +927,75 @@ vuint64m1_t scalar, size_t vl) { return vredminu_vs_u64m8_u64m1_m(mask, dst, vector, scalar, vl); } + +// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32mf2_i32m1_tu(vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32mf2_i32m1_tu(merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32mf2_u32m1_tu(merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32mf2_i32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32mf2_u32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32mf2_i32m1_tum(mask, merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32mf2_u32m1_tum(mask, merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32mf2_i32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32mf2_u32m1_tam(mask, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredor.c @@ -927,3 +927,75 @@ size_t vl) { return vredor_vs_u64m8_u64m1_m(mask, dst, vector, scalar, vl); } + +// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32mf2_i32m1_tu(vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32mf2_i32m1_tu(merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32mf2_u32m1_tu(merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32mf2_i32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32mf2_u32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32mf2_i32m1_tum(mask, merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32mf2_u32m1_tum(mask, merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32mf2_i32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32mf2_u32m1_tam(mask, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredsum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredsum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredsum.c @@ -927,3 +927,75 @@ vuint64m1_t scalar, size_t vl) { return vredsum_vs_u64m8_u64m1_m(mask, dst, vector, scalar, vl); } + +// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32mf2_i32m1_tu(vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32mf2_i32m1_tu(merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32mf2_u32m1_tu(merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32mf2_i32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32mf2_u32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32mf2_i32m1_tum(mask, merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32mf2_u32m1_tum(mask, merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32mf2_i32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32mf2_u32m1_tam(mask, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredxor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredxor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredxor.c @@ -927,3 +927,75 @@ vuint64m1_t scalar, size_t vl) { return vredxor_vs_u64m8_u64m1_m(mask, dst, vector, scalar, vl); } + +// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32mf2_i32m1_tu(vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32mf2_i32m1_tu(merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32mf2_u32m1_tu(merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32mf2_i32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32mf2_u32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32mf2_i32m1_tum(mask, merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32mf2_u32m1_tum(mask, merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32mf2_i32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32mf2_u32m1_tam(mask, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwredsum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwredsum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwredsum.c @@ -759,3 +759,75 @@ vuint64m1_t scalar, size_t vl) { return vwredsumu_vs_u32m8_u64m1_m(mask, dst, vector, scalar, vl); } + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tu(vint64m1_t merge, vint32mf2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32mf2_i64m1_tu(merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tu(vuint64m1_t merge, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32mf2_u64m1_tu(merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32mf2_i64m1_ta(vint32mf2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32mf2_i64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_ta(vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32mf2_u64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tum(vbool64_t mask, vint64m1_t merge, vint32mf2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32mf2_i64m1_tum(mask, merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tum(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32mf2_u64m1_tum(mask, merge, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tam(vbool64_t mask, vint32mf2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32mf2_i64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64( undef, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32mf2_u64m1_tam(mask, vector, scalar, vl); +} diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp --- a/clang/utils/TableGen/RISCVVEmitter.cpp +++ b/clang/utils/TableGen/RISCVVEmitter.cpp @@ -194,6 +194,10 @@ if (RVVI->hasMaskedOffOperand() && RVVI->getDefaultPolicy() == Policy::TAMA) OS << " Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));\n"; + // Masked reduction cases. + if (!RVVI->hasMaskedOffOperand() && RVVI->hasPassthruOperand() && + RVVI->getDefaultPolicy() == Policy::TAMA) + OS << " Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));\n"; } else { OS << " std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());\n"; } @@ -201,7 +205,8 @@ if (RVVI->hasPolicyOperand()) OS << " Ops.push_back(ConstantInt::get(Ops.back()->getType(), " "DefaultPolicy));\n"; - else if (RVVI->hasPassthruOperand() && RVVI->getDefaultPolicy() == Policy::TA) + else if (RVVI->hasPassthruOperand() && + RVVI->getDefaultPolicy() == Policy::TA) OS << " Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));\n"; }