diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -299,6 +299,11 @@ list> suffixes_prototypes> : RVVBuiltinSet; +// IntrinsicTypes is output, op1 [-1, 0] +multiclass RVVOutOp0BuiltinSet> suffixes_prototypes> + : RVVBuiltinSet; + // IntrinsicTypes is output, op1 [-1, 1] multiclass RVVOutOp1BuiltinSet> suffixes_prototypes> @@ -498,22 +503,22 @@ class RVVConvToNarrowingUnsignedBuiltin : RVVConvBuiltin<"Uv", "UvFw", "csi", overloaded_name>; -let HasMaskedOffOperand = false in { +let HasMaskedOffOperand = true in { multiclass RVVSignedReductionBuiltin { - defm "" : RVVOutOp1BuiltinSet; + defm "" : RVVOutOp0BuiltinSet; } multiclass RVVUnsignedReductionBuiltin { - defm "" : RVVOutOp1BuiltinSet; + defm "" : RVVOutOp0BuiltinSet; } multiclass RVVFloatingReductionBuiltin { - defm "" : RVVOutOp1BuiltinSet; + defm "" : RVVOutOp0BuiltinSet; } multiclass RVVFloatingWidenReductionBuiltin { - defm "" : RVVOutOp1BuiltinSet; + defm "" : RVVOutOp0BuiltinSet; } } @@ -2057,7 +2062,7 @@ // 15.1. Vector Single-Width Integer Reduction Instructions let UnMaskedPolicyScheme = HasPassthruOperand, MaskedPolicyScheme = HasPassthruOperand, - IsPrototypeDefaultTU = true, + IsPrototypeDefaultTU = false, HasMaskPolicy = false in { defm vredsum : RVVIntReductionBuiltinSet; defm vredmaxu : RVVUnsignedReductionBuiltin; @@ -2070,11 +2075,11 @@ // 15.2. Vector Widening Integer Reduction Instructions // Vector Widening Integer Reduction Operations -let HasMaskedOffOperand = false in { - defm vwredsum : RVVOutOp1BuiltinSet<"vwredsum", "csi", - [["vs", "vSw", "SwSwvSw"]]>; - defm vwredsumu : RVVOutOp1BuiltinSet<"vwredsumu", "csi", - [["vs", "UvUSw", "USwUSwUvUSw"]]>; +let HasMaskedOffOperand = true in { + defm vwredsum : RVVOutOp0BuiltinSet<"vwredsum", "csi", + [["vs", "vSw", "SwvSw"]]>; + defm vwredsumu : RVVOutOp0BuiltinSet<"vwredsumu", "csi", + [["vs", "UvUSw", "USwUvUSw"]]>; } // 15.3. Vector Single-Width Floating-Point Reduction Instructions diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmax.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmax.c @@ -1,207 +1,468 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +v \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +v -target-feature +zfh -target-feature +experimental-zvfh \ // RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf4_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1 (vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf2_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1 (vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m1_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m1_f16m1 (vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m2_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m2_f16m1 (vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m4_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m4_f16m1 (vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m8_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m8_f16m1 (vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax(vector, scalar, vl); +} + // CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1(vfloat32m1_t dst, - vfloat32mf2_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredmax(dst, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1 (vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredmax(dst, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m1_f32m1 (vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredmax(dst, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m2_f32m1 (vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredmax(dst, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m4_f32m1 (vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredmax(dst, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m8_f32m1 (vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredmax(dst, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m1_f64m1 (vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredmax(dst, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m2_f64m1 (vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredmax(dst, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m4_f64m1 (vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredmax(dst, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m8_f64m1 (vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf4_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_m (vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf2_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_m (vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m1_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m2_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_m (vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m4_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_m (vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m8_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_m (vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst, - vfloat32mf2_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredmax(mask, dst, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_m (vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst, - vfloat32m1_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredmax(mask, dst, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst, - vfloat32m2_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredmax(mask, dst, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_m (vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst, - vfloat32m4_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredmax(mask, dst, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_m (vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst, - vfloat32m8_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredmax(mask, dst, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_m (vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst, - vfloat64m1_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredmax(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst, - vfloat64m2_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredmax(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_m (vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst, - vfloat64m4_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredmax(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_m (vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst, - vfloat64m8_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredmax(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_m (vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf4_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_tu (vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf2_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_tu (vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m1_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m2_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m4_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m8_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tu(vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax_tu(merge, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tu (vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf4_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_ta (vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf2_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_ta (vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m1_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_ta (vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m2_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_ta (vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m4_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_ta (vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m8_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_ta (vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_ta(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_ta( @@ -209,17 +470,269 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_ta(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { +vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_ta (vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_ta (vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_ta (vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_ta (vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_ta (vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { return vfredmax_ta(vector, scalar, vl); } +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_ta (vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_ta (vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_ta (vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_ta (vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf4_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_tum (vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf2_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_tum (vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m1_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_tum (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m2_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_tum (vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m4_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_tum (vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m8_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_tum (vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_tum(mask, maskedoff, vector, scalar, vl); +} + // CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_tum( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tum (vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_tum (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_tum (vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax_tum(mask, merge, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_tum (vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_tum (vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_tum (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_tum (vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_tum (vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_tum (vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf4_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_tam (vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf2_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_tam (vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m1_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_tam (vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m2_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_tam (vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m4_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_tam (vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m8_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_tam (vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_tam(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_tam( @@ -227,6 +740,78 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { +vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tam (vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_tam (vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_tam (vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_tam (vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_tam (vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_tam (vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_tam (vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_tam (vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_tam (vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { return vfredmax_tam(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmin.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmin.c @@ -1,207 +1,468 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +v \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +v -target-feature +zfh -target-feature +experimental-zvfh \ // RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf4_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1 (vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf2_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1 (vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m1_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m1_f16m1 (vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m2_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m2_f16m1 (vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m4_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m4_f16m1 (vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m8_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m8_f16m1 (vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin(vector, scalar, vl); +} + // CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1(vfloat32m1_t dst, - vfloat32mf2_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredmin(dst, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1 (vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredmin(dst, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m1_f32m1 (vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredmin(dst, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m2_f32m1 (vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredmin(dst, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m4_f32m1 (vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredmin(dst, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m8_f32m1 (vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredmin(dst, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m1_f64m1 (vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredmin(dst, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m2_f64m1 (vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredmin(dst, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m4_f64m1 (vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredmin(dst, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m8_f64m1 (vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf4_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_m (vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf2_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_m (vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m1_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m2_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_m (vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m4_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_m (vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m8_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_m (vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst, - vfloat32mf2_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredmin(mask, dst, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_m (vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst, - vfloat32m1_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredmin(mask, dst, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst, - vfloat32m2_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredmin(mask, dst, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_m (vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst, - vfloat32m4_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredmin(mask, dst, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_m (vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst, - vfloat32m8_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredmin(mask, dst, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_m (vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst, - vfloat64m1_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredmin(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst, - vfloat64m2_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredmin(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_m (vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst, - vfloat64m4_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredmin(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_m (vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst, - vfloat64m8_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredmin(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_m (vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf4_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_tu (vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf2_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_tu (vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m1_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m2_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m4_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m8_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tu(vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin_tu(merge, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tu (vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf4_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_ta (vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf2_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_ta (vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m1_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_ta (vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m2_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_ta (vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m4_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_ta (vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m8_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_ta (vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_ta(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_ta( @@ -209,17 +470,269 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_ta(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { +vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_ta (vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_ta (vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_ta (vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_ta (vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_ta (vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { return vfredmin_ta(vector, scalar, vl); } +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_ta (vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_ta (vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_ta (vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_ta (vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf4_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_tum (vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf2_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_tum (vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m1_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_tum (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m2_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_tum (vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m4_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_tum (vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m8_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_tum (vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_tum(mask, maskedoff, vector, scalar, vl); +} + // CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_tum( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tum (vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_tum (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_tum (vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin_tum(mask, merge, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_tum (vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_tum (vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_tum (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_tum (vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_tum (vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_tum (vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf4_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_tam (vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf2_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_tam (vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m1_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_tam (vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m2_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_tam (vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m4_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_tam (vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m8_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_tam (vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_tam(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_tam( @@ -227,6 +740,78 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { +vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tam (vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_tam (vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_tam (vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_tam (vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_tam (vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_tam (vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_tam (vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_tam (vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_tam (vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { return vfredmin_tam(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c @@ -1,466 +1,1628 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +v \ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +v -target-feature +zfh -target-feature +experimental-zvfh \ // RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf4_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1 (vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf2_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1 (vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m1_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1 (vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m2_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1 (vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m4_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1 (vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m8_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1 (vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1(vfloat32m1_t dst, - vfloat32mf2_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredusum(dst, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1 (vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredusum(dst, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1 (vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredusum(dst, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1 (vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredusum(dst, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1 (vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredusum(dst, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1 (vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredusum_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredusum(dst, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1 (vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredusum_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredusum(dst, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1 (vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredusum_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredusum(dst, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1 (vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredusum_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredusum(dst, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1 (vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_m( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf4_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_m (vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf2_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_m (vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m1_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m2_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_m (vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m4_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_m (vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m8_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_m (vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst, - vfloat32mf2_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredusum(mask, dst, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_m (vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1_m( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst, - vfloat32m1_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredusum(mask, dst, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1_m( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst, - vfloat32m2_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredusum(mask, dst, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_m (vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1_m( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst, - vfloat32m4_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredusum(mask, dst, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_m (vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1_m( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst, - vfloat32m8_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredusum(mask, dst, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_m (vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1_m( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst, - vfloat64m1_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredusum(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1_m( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst, - vfloat64m2_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredusum(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_m (vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1_m( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst, - vfloat64m4_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredusum(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_m (vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1_m( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst, - vfloat64m8_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredusum(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_m (vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf4_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1 (vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf2_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1 (vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m1_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m1_f16m1 (vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m2_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m2_f16m1 (vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m4_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m4_f16m1 (vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m8_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m8_f16m1 (vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1(vfloat32m1_t dst, - vfloat32mf2_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredosum(dst, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1 (vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m1_f32m1(vfloat32m1_t dst, - vfloat32m1_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredosum(dst, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1 (vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m2_f32m1(vfloat32m1_t dst, - vfloat32m2_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredosum(dst, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1 (vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m4_f32m1(vfloat32m1_t dst, - vfloat32m4_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredosum(dst, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1 (vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m8_f32m1(vfloat32m1_t dst, - vfloat32m8_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredosum(dst, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1 (vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m1_f64m1(vfloat64m1_t dst, - vfloat64m1_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredosum(dst, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1 (vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m2_f64m1(vfloat64m1_t dst, - vfloat64m2_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredosum(dst, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1 (vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m4_f64m1(vfloat64m1_t dst, - vfloat64m4_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredosum(dst, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1 (vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m8_f64m1(vfloat64m1_t dst, - vfloat64m8_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredosum(dst, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1 (vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf4_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_m (vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf2_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_m (vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m1_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m2_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_m (vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m4_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_m (vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m8_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_m (vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst, - vfloat32mf2_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredosum(mask, dst, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_m (vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst, - vfloat32m1_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredosum(mask, dst, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst, - vfloat32m2_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredosum(mask, dst, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_m (vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst, - vfloat32m4_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredosum(mask, dst, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_m (vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst, - vfloat32m8_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredosum(mask, dst, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_m (vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst, - vfloat64m1_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredosum(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst, - vfloat64m2_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredosum(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_m (vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst, - vfloat64m4_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredosum(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_m (vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst, - vfloat64m8_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredosum(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_m (vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf4_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_tu (vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf2_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_tu (vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m1_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m2_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m4_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m8_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tu(vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum_tu(merge, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tu (vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_ta( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_ta(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum_ta(vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_tum( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum_tum(mask, merge, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_tam( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum_tam(mask, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_tu( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tu(vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum_tu(merge, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_ta( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_ta(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum_ta(vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_tum( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum_tum(mask, merge, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_tam( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf4_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_ta (vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf2_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_ta (vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m1_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_ta (vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m2_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_ta (vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m4_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_ta (vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m8_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_ta (vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_ta (vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_ta (vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_ta (vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_ta (vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_ta (vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_ta (vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_ta (vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_ta (vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_ta (vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf4_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_tum (vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf2_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_tum (vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m1_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_tum (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m2_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_tum (vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m4_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_tum (vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m8_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_tum (vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tum (vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_tum (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_tum (vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_tum (vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_tum (vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_tum (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_tum (vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_tum (vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_tum (vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf4_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_tam (vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf2_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_tam (vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m1_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_tam (vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m2_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_tam (vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m4_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_tam (vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m8_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_tam (vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tam (vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_tam (vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_tam (vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_tam (vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_tam (vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_tam (vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_tam (vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_tam (vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_tam (vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf4_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_tu (vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf2_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_tu (vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m1_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m2_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m4_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m8_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tu (vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf4_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_ta (vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf2_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_ta (vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m1_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_ta (vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m2_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_ta (vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m4_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_ta (vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m8_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_ta (vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_ta (vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_ta (vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_ta (vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_ta (vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_ta (vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_ta (vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_ta (vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_ta (vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_ta (vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf4_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_tum (vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf2_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_tum (vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m1_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_tum (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m2_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_tum (vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m4_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_tum (vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m8_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_tum (vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tum (vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_tum (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_tum (vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_tum (vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_tum (vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_tum (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_tum (vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_tum (vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_tum (vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf4_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_tam (vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf2_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_tam (vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m1_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_tam (vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m2_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_tam (vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m4_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_tam (vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m8_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_tam (vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tam (vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_tam (vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_tam (vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_tam (vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_tam (vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_tam (vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_tam (vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_tam (vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_tam (vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { return vfredosum_tam(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c @@ -1,237 +1,954 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +v \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +v -target-feature +zfh -target-feature +experimental-zvfh \ // RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf4_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1 (vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf2_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1 (vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m1_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1 (vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m2_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1 (vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m4_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1 (vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m8_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1 (vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1 (vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1 (vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1 (vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1 (vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1 (vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf4_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_m (vbool64_t mask, vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf2_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m1_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_m (vbool16_t mask, vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m2_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_m (vbool8_t mask, vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m4_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_m (vbool4_t mask, vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m8_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_m (vbool2_t mask, vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_m (vbool32_t mask, vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_m (vbool16_t mask, vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_m (vbool8_t mask, vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_m (vbool4_t mask, vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf4_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1 (vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf2_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1 (vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m1_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1 (vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m2_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1 (vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m4_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1 (vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m8_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1 (vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum(vector, scalar, vl); +} + // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1 (vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m1_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1 (vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m2_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1 (vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m4_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1 (vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m8_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1 (vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf4_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_m (vbool64_t mask, vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf2_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m1_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_m (vbool16_t mask, vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m2_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_m (vbool8_t mask, vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m4_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_m (vbool4_t mask, vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m8_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_m (vbool2_t mask, vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m1_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_m (vbool32_t mask, vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m2_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_m (vbool16_t mask, vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m4_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_m (vbool8_t mask, vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m8_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_m (vbool4_t mask, vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf4_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_tu (vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf2_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_tu (vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m1_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_tu (vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m2_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_tu (vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m4_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_tu (vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m8_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_tu (vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tu (vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_tu (vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_tu (vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_tu (vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_tu (vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf4_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_ta (vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf2_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_ta (vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m1_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_ta (vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m2_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_ta (vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m4_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_ta (vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m8_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1(vfloat64m1_t dst, - vfloat32mf2_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfwredusum(dst, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_ta (vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m1_f64m1( +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1(vfloat64m1_t dst, - vfloat32m1_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfwredusum(dst, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_ta (vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m2_f64m1( +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1(vfloat64m1_t dst, - vfloat32m2_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfwredusum(dst, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_ta (vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m4_f64m1( +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1(vfloat64m1_t dst, - vfloat32m4_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfwredusum(dst, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_ta (vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m8_f64m1( +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1(vfloat64m1_t dst, - vfloat32m8_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfwredusum(dst, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_ta (vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_m( +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst, - vfloat32mf2_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfwredusum(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_ta (vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m1_f64m1_m( +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf4_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_tum (vbool64_t mask, vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf2_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_tum (vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m1_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_tum (vbool16_t mask, vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m2_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_tum (vbool8_t mask, vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m4_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_tum (vbool4_t mask, vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m8_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_tum (vbool2_t mask, vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_tum( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst, - vfloat32m1_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfwredusum(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tum (vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_tum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m2_f64m1_m( +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1_tum( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst, - vfloat32m2_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfwredusum(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_tum (vbool32_t mask, vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_tum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m4_f64m1_m( +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1_tum( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst, - vfloat32m4_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfwredusum(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_tum (vbool16_t mask, vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_tum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m8_f64m1_m( +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1_tum( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dst, - vfloat32m8_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfwredusum(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_tum (vbool8_t mask, vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_tum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1( +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1_tum( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1(vfloat64m1_t dst, - vfloat32mf2_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfwredosum(dst, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_tum (vbool4_t mask, vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_tum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1( +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf4_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_tam (vbool64_t mask, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf2_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_tam (vbool32_t mask, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m1_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_tam (vbool16_t mask, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m2_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_tam (vbool8_t mask, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m4_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_tam (vbool4_t mask, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m8_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_tam (vbool2_t mask, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_tam( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1(vfloat64m1_t dst, - vfloat32m1_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfwredosum(dst, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tam (vbool64_t mask, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_tam(mask, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1( +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1_tam( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1(vfloat64m1_t dst, - vfloat32m2_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfwredosum(dst, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_tam (vbool32_t mask, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_tam(mask, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1( +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1_tam( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1(vfloat64m1_t dst, - vfloat32m4_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfwredosum(dst, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_tam (vbool16_t mask, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_tam(mask, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1( +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1_tam( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1(vfloat64m1_t dst, - vfloat32m8_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfwredosum(dst, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_tam (vbool8_t mask, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_tam(mask, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_m( +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1_tam( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst, - vfloat32mf2_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfwredosum(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_tam (vbool4_t mask, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_tam(mask, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1_m( +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf4_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_tu (vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf2_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_tu (vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m1_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_tu (vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m2_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_tu (vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m4_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_tu (vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m8_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_tu (vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst, - vfloat32m1_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfwredosum(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tu (vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1_m( +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m1_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst, - vfloat32m2_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfwredosum(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_tu (vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1_m( +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m2_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst, - vfloat32m4_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfwredosum(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_tu (vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1_m( +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m4_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dst, - vfloat32m8_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfwredosum(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_tu (vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_tu( +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m8_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tu (vfloat64m1_t merge, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum_tu(merge, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_tu (vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf4_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_ta (vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf2_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_ta (vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m1_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_ta (vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m2_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_ta (vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m4_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_ta (vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m8_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_ta (vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_ta(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_ta( @@ -243,13 +960,193 @@ return vfwredusum_ta(vector, scalar, vl); } +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m1_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_ta (vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m2_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_ta (vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m4_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_ta (vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m8_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_ta (vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf4_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_tum (vbool64_t mask, vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf2_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_tum (vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m1_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_tum (vbool16_t mask, vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m2_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_tum (vbool8_t mask, vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m4_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_tum (vbool4_t mask, vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m8_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_tum (vbool2_t mask, vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_tum(mask, maskedoff, vector, scalar, vl); +} + // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_tum( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tum (vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m1_f64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_tum (vbool32_t mask, vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m2_f64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_tum (vbool16_t mask, vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m4_f64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_tum (vbool8_t mask, vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m8_f64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tum (vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum_tum(mask, merge, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_tum (vbool4_t mask, vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf4_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_tam (vbool64_t mask, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf2_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_tam (vbool32_t mask, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m1_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_tam (vbool16_t mask, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m2_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_tam (vbool8_t mask, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m4_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_tam (vbool4_t mask, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m8_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_tam (vbool2_t mask, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_tam(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_tam( @@ -261,38 +1158,38 @@ return vfwredusum_tam(mask, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_tu( +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m1_f64m1_tam( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tu(vfloat64m1_t merge, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_tu(merge, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_tam (vbool32_t mask, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_tam(mask, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_ta( +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m2_f64m1_tam( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_ta(vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_ta(vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_tam (vbool16_t mask, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_tam(mask, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_tum( +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m4_f64m1_tam( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tum(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_tum(mask, merge, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_tam (vbool8_t mask, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_tam(mask, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_tam( +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m8_f64m1_tam( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_tam(mask, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_tam (vbool4_t mask, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_tam(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredand.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredand.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredand.c @@ -6,996 +6,2376 @@ // CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector, - vint8m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf8_i8m1 (vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector, - vint8m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf4_i8m1 (vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector, - vint8m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf2_i8m1 (vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector, - vint8m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m1_i8m1 (vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector, - vint8m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m2_i8m1 (vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector, - vint8m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m4_i8m1 (vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector, - vint8m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m8_i8m1 (vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector, - vint16m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16mf4_i16m1 (vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector, - vint16m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16mf2_i16m1 (vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector, - vint16m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m1_i16m1 (vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector, - vint16m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m2_i16m1 (vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector, - vint16m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m4_i16m1 (vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector, - vint16m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m8_i16m1 (vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector, - vint32m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32mf2_i32m1 (vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector, - vint32m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m1_i32m1 (vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector, - vint32m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m2_i32m1 (vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector, - vint32m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m4_i32m1 (vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector, - vint32m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m8_i32m1 (vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector, - vint64m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m1_i64m1 (vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector, - vint64m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m2_i64m1 (vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector, - vint64m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m4_i64m1 (vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector, - vint64m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m8_i64m1 (vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector, - vuint8m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf8_u8m1 (vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector, - vuint8m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf4_u8m1 (vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector, - vuint8m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf2_u8m1 (vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector, - vuint8m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m1_u8m1 (vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector, - vuint8m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m2_u8m1 (vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector, - vuint8m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m4_u8m1 (vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector, - vuint8m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m8_u8m1 (vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16mf4_u16m1 (vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16mf2_u16m1 (vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector, - vuint16m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m1_u16m1 (vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m2_u16m1 (vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m4_u16m1 (vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector, - vuint16m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m8_u16m1 (vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32mf2_u32m1 (vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector, - vuint32m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m1_u32m1 (vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m2_u32m1 (vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector, - vuint32m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m4_u32m1 (vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector, - vuint32m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m8_u32m1 (vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector, - vuint64m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m1_u64m1 (vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector, - vuint64m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m2_u64m1 (vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector, - vuint64m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m4_u64m1 (vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector, - vuint64m1_t scalar, size_t vl) { - return vredand(dst, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m8_u64m1 (vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst, - vint8mf8_t vector, vint8m1_t scalar, - size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf8_i8m1_m (vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst, - vint8mf4_t vector, vint8m1_t scalar, - size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf4_i8m1_m (vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst, - vint8mf2_t vector, vint8m1_t scalar, - size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf2_i8m1_m (vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst, - vint8m1_t vector, vint8m1_t scalar, - size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m1_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst, - vint8m2_t vector, vint8m1_t scalar, - size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m2_i8m1_m (vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst, - vint8m4_t vector, vint8m1_t scalar, - size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m4_i8m1_m (vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst, - vint8m8_t vector, vint8m1_t scalar, - size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m8_i8m1_m (vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst, - vint16mf4_t vector, vint16m1_t scalar, - size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16mf4_i16m1_m (vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst, - vint16mf2_t vector, vint16m1_t scalar, - size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16mf2_i16m1_m (vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst, - vint16m1_t vector, vint16m1_t scalar, - size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m1_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst, - vint16m2_t vector, vint16m1_t scalar, - size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m2_i16m1_m (vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst, - vint16m4_t vector, vint16m1_t scalar, - size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m4_i16m1_m (vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst, - vint16m8_t vector, vint16m1_t scalar, - size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m8_i16m1_m (vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst, - vint32mf2_t vector, vint32m1_t scalar, - size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32mf2_i32m1_m (vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst, - vint32m1_t vector, vint32m1_t scalar, - size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m1_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst, - vint32m2_t vector, vint32m1_t scalar, - size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m2_i32m1_m (vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst, - vint32m4_t vector, vint32m1_t scalar, - size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m4_i32m1_m (vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst, - vint32m8_t vector, vint32m1_t scalar, - size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m8_i32m1_m (vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst, - vint64m1_t vector, vint64m1_t scalar, - size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m1_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst, - vint64m2_t vector, vint64m1_t scalar, - size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m2_i64m1_m (vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst, - vint64m4_t vector, vint64m1_t scalar, - size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m4_i64m1_m (vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst, - vint64m8_t vector, vint64m1_t scalar, - size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m8_i64m1_m (vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst, - vuint8mf8_t vector, vuint8m1_t scalar, - size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf8_u8m1_m (vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst, - vuint8mf4_t vector, vuint8m1_t scalar, - size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf4_u8m1_m (vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst, - vuint8mf2_t vector, vuint8m1_t scalar, - size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf2_u8m1_m (vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst, - vuint8m1_t vector, vuint8m1_t scalar, - size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m1_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst, - vuint8m2_t vector, vuint8m1_t scalar, - size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m2_u8m1_m (vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst, - vuint8m4_t vector, vuint8m1_t scalar, - size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m4_u8m1_m (vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst, - vuint8m8_t vector, vuint8m1_t scalar, - size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m8_u8m1_m (vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst, - vuint16mf4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16mf4_u16m1_m (vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst, - vuint16mf2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16mf2_u16m1_m (vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst, - vuint16m1_t vector, - vuint16m1_t scalar, size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m1_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst, - vuint16m2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m2_u16m1_m (vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst, - vuint16m4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m4_u16m1_m (vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst, - vuint16m8_t vector, - vuint16m1_t scalar, size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m8_u16m1_m (vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst, - vuint32mf2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32mf2_u32m1_m (vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst, - vuint32m1_t vector, - vuint32m1_t scalar, size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m1_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst, - vuint32m2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m2_u32m1_m (vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst, - vuint32m4_t vector, - vuint32m1_t scalar, size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m4_u32m1_m (vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst, - vuint32m8_t vector, - vuint32m1_t scalar, size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m8_u32m1_m (vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst, - vuint64m1_t vector, - vuint64m1_t scalar, size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m1_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst, - vuint64m2_t vector, - vuint64m1_t scalar, size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m2_u64m1_m (vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst, - vuint64m4_t vector, - vuint64m1_t scalar, size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m4_u64m1_m (vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst, - vuint64m8_t vector, - vuint64m1_t scalar, size_t vl) { - return vredand(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m8_u64m1_m (vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8mf8_i8m1_tu (vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8mf4_i8m1_tu (vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8mf2_i8m1_tu (vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m1_i8m1_tu (vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m2_i8m1_tu (vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m4_i8m1_tu (vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m8_i8m1_tu (vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16mf4_i16m1_tu (vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16mf2_i16m1_tu (vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m1_i16m1_tu (vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m2_i16m1_tu (vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m4_i16m1_tu (vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m8_i16m1_tu (vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32mf2_i32m1_tu(vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredand_tu(merge, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32mf2_i32m1_tu (vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_tu( +// CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredand_tu(merge, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m1_i32m1_tu (vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_ta( +// CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredand_ta(vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m2_i32m1_tu (vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_ta( +// CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredand_ta(vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m4_i32m1_tu (vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_tum( +// CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredand_tum(mask, merge, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m8_i32m1_tu (vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_tum( +// CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m1_i64m1_tu (vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m2_i64m1_tu (vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m4_i64m1_tu (vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m8_i64m1_tu (vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8mf8_u8m1_tu (vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8mf4_u8m1_tu (vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8mf2_u8m1_tu (vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m1_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m2_u8m1_tu (vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m4_u8m1_tu (vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m8_u8m1_tu (vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16mf4_u16m1_tu (vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16mf2_u16m1_tu (vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m1_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m2_u16m1_tu (vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m4_u16m1_tu (vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m8_u16m1_tu (vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredand_tum(mask, merge, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32mf2_u32m1_tu (vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_tam( +// CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredand_tam(mask, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m1_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_tam( +// CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m2_u32m1_tu (vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m4_u32m1_tu (vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { +vuint32m1_t test_vredand_vs_u32m8_u32m1_tu (vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m1_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m2_u64m1_tu (vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m4_u64m1_tu (vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m8_u64m1_tu (vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8mf8_i8m1_ta (vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8mf4_i8m1_ta (vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8mf2_i8m1_ta (vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m1_i8m1_ta (vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m2_i8m1_ta (vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m4_i8m1_ta (vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m8_i8m1_ta (vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16mf4_i16m1_ta (vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16mf2_i16m1_ta (vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m1_i16m1_ta (vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m2_i16m1_ta (vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m4_i16m1_ta (vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m8_i16m1_ta (vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32mf2_i32m1_ta (vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m1_i32m1_ta (vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m2_i32m1_ta (vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m4_i32m1_ta (vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m8_i32m1_ta (vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m1_i64m1_ta (vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m2_i64m1_ta (vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m4_i64m1_ta (vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m8_i64m1_ta (vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8mf8_u8m1_ta (vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8mf4_u8m1_ta (vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8mf2_u8m1_ta (vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m1_u8m1_ta (vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m2_u8m1_ta (vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m4_u8m1_ta (vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m8_u8m1_ta (vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16mf4_u16m1_ta (vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16mf2_u16m1_ta (vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m1_u16m1_ta (vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m2_u16m1_ta (vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m4_u16m1_ta (vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m8_u16m1_ta (vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32mf2_u32m1_ta (vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m1_u32m1_ta (vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m2_u32m1_ta (vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m4_u32m1_ta (vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m8_u32m1_ta (vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m1_u64m1_ta (vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m2_u64m1_ta (vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m4_u64m1_ta (vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m8_u64m1_ta (vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8mf8_i8m1_tum (vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8mf4_i8m1_tum (vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8mf2_i8m1_tum (vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m1_i8m1_tum (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m2_i8m1_tum (vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m4_i8m1_tum (vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m8_i8m1_tum (vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16mf4_i16m1_tum (vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16mf2_i16m1_tum (vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m1_i16m1_tum (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m2_i16m1_tum (vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m4_i16m1_tum (vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m8_i16m1_tum (vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32mf2_i32m1_tum (vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m1_i32m1_tum (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m2_i32m1_tum (vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m4_i32m1_tum (vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m8_i32m1_tum (vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m1_i64m1_tum (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m2_i64m1_tum (vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m4_i64m1_tum (vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m8_i64m1_tum (vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8mf8_u8m1_tum (vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8mf4_u8m1_tum (vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8mf2_u8m1_tum (vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m1_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m2_u8m1_tum (vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m4_u8m1_tum (vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m8_u8m1_tum (vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16mf4_u16m1_tum (vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16mf2_u16m1_tum (vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m1_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m2_u16m1_tum (vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m4_u16m1_tum (vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m8_u16m1_tum (vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32mf2_u32m1_tum (vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m1_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m2_u32m1_tum (vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m4_u32m1_tum (vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m8_u32m1_tum (vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m1_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m2_u64m1_tum (vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m4_u64m1_tum (vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m8_u64m1_tum (vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8mf8_i8m1_tam (vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8mf4_i8m1_tam (vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8mf2_i8m1_tam (vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m1_i8m1_tam (vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m2_i8m1_tam (vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m4_i8m1_tam (vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m8_i8m1_tam (vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16mf4_i16m1_tam (vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16mf2_i16m1_tam (vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m1_i16m1_tam (vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m2_i16m1_tam (vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m4_i16m1_tam (vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m8_i16m1_tam (vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32mf2_i32m1_tam (vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m1_i32m1_tam (vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m2_i32m1_tam (vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m4_i32m1_tam (vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m8_i32m1_tam (vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m1_i64m1_tam (vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m2_i64m1_tam (vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m4_i64m1_tam (vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m8_i64m1_tam (vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8mf8_u8m1_tam (vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8mf4_u8m1_tam (vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8mf2_u8m1_tam (vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m1_u8m1_tam (vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m2_u8m1_tam (vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m4_u8m1_tam (vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m8_u8m1_tam (vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16mf4_u16m1_tam (vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16mf2_u16m1_tam (vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m1_u16m1_tam (vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m2_u16m1_tam (vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m4_u16m1_tam (vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m8_u16m1_tam (vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32mf2_u32m1_tam (vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m1_u32m1_tam (vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m2_u32m1_tam (vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m4_u32m1_tam (vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m8_u32m1_tam (vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m1_u64m1_tam (vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m2_u64m1_tam (vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m4_u64m1_tam (vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m8_u64m1_tam (vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { return vredand_tam(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmax.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmax.c @@ -6,996 +6,2376 @@ // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector, - vint8m1_t scalar, size_t vl) { - return vredmax(dst, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf8_i8m1 (vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector, - vint8m1_t scalar, size_t vl) { - return vredmax(dst, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf4_i8m1 (vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector, - vint8m1_t scalar, size_t vl) { - return vredmax(dst, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf2_i8m1 (vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector, - vint8m1_t scalar, size_t vl) { - return vredmax(dst, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m1_i8m1 (vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector, - vint8m1_t scalar, size_t vl) { - return vredmax(dst, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m2_i8m1 (vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector, - vint8m1_t scalar, size_t vl) { - return vredmax(dst, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m4_i8m1 (vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector, - vint8m1_t scalar, size_t vl) { - return vredmax(dst, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m8_i8m1 (vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector, - vint16m1_t scalar, size_t vl) { - return vredmax(dst, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16mf4_i16m1 (vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector, - vint16m1_t scalar, size_t vl) { - return vredmax(dst, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16mf2_i16m1 (vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector, - vint16m1_t scalar, size_t vl) { - return vredmax(dst, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m1_i16m1 (vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector, - vint16m1_t scalar, size_t vl) { - return vredmax(dst, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m2_i16m1 (vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector, - vint16m1_t scalar, size_t vl) { - return vredmax(dst, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m4_i16m1 (vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector, - vint16m1_t scalar, size_t vl) { - return vredmax(dst, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m8_i16m1 (vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector, - vint32m1_t scalar, size_t vl) { - return vredmax(dst, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32mf2_i32m1 (vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector, - vint32m1_t scalar, size_t vl) { - return vredmax(dst, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m1_i32m1 (vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector, - vint32m1_t scalar, size_t vl) { - return vredmax(dst, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m2_i32m1 (vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector, - vint32m1_t scalar, size_t vl) { - return vredmax(dst, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m4_i32m1 (vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector, - vint32m1_t scalar, size_t vl) { - return vredmax(dst, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m8_i32m1 (vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector, - vint64m1_t scalar, size_t vl) { - return vredmax(dst, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m1_i64m1 (vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector, - vint64m1_t scalar, size_t vl) { - return vredmax(dst, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m2_i64m1 (vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector, - vint64m1_t scalar, size_t vl) { - return vredmax(dst, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m4_i64m1 (vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector, - vint64m1_t scalar, size_t vl) { - return vredmax(dst, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m8_i64m1 (vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector, - vuint8m1_t scalar, size_t vl) { - return vredmaxu(dst, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1 (vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector, - vuint8m1_t scalar, size_t vl) { - return vredmaxu(dst, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1 (vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector, - vuint8m1_t scalar, size_t vl) { - return vredmaxu(dst, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1 (vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector, - vuint8m1_t scalar, size_t vl) { - return vredmaxu(dst, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m1_u8m1 (vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector, - vuint8m1_t scalar, size_t vl) { - return vredmaxu(dst, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m2_u8m1 (vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector, - vuint8m1_t scalar, size_t vl) { - return vredmaxu(dst, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m4_u8m1 (vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector, - vuint8m1_t scalar, size_t vl) { - return vredmaxu(dst, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m8_u8m1 (vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredmaxu(dst, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1 (vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredmaxu(dst, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1 (vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector, - vuint16m1_t scalar, size_t vl) { - return vredmaxu(dst, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m1_u16m1 (vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredmaxu(dst, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m2_u16m1 (vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredmaxu(dst, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m4_u16m1 (vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector, - vuint16m1_t scalar, size_t vl) { - return vredmaxu(dst, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m8_u16m1 (vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredmaxu(dst, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1 (vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector, - vuint32m1_t scalar, size_t vl) { - return vredmaxu(dst, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m1_u32m1 (vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredmaxu(dst, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m2_u32m1 (vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector, - vuint32m1_t scalar, size_t vl) { - return vredmaxu(dst, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m4_u32m1 (vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector, - vuint32m1_t scalar, size_t vl) { - return vredmaxu(dst, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m8_u32m1 (vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector, - vuint64m1_t scalar, size_t vl) { - return vredmaxu(dst, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m1_u64m1 (vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector, - vuint64m1_t scalar, size_t vl) { - return vredmaxu(dst, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m2_u64m1 (vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector, - vuint64m1_t scalar, size_t vl) { - return vredmaxu(dst, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m4_u64m1 (vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector, - vuint64m1_t scalar, size_t vl) { - return vredmaxu(dst, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m8_u64m1 (vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst, - vint8mf8_t vector, vint8m1_t scalar, - size_t vl) { - return vredmax(mask, dst, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf8_i8m1_m (vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst, - vint8mf4_t vector, vint8m1_t scalar, - size_t vl) { - return vredmax(mask, dst, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf4_i8m1_m (vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst, - vint8mf2_t vector, vint8m1_t scalar, - size_t vl) { - return vredmax(mask, dst, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf2_i8m1_m (vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst, - vint8m1_t vector, vint8m1_t scalar, - size_t vl) { - return vredmax(mask, dst, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m1_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst, - vint8m2_t vector, vint8m1_t scalar, - size_t vl) { - return vredmax(mask, dst, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m2_i8m1_m (vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst, - vint8m4_t vector, vint8m1_t scalar, - size_t vl) { - return vredmax(mask, dst, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m4_i8m1_m (vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst, - vint8m8_t vector, vint8m1_t scalar, - size_t vl) { - return vredmax(mask, dst, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m8_i8m1_m (vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst, - vint16mf4_t vector, vint16m1_t scalar, - size_t vl) { - return vredmax(mask, dst, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16mf4_i16m1_m (vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst, - vint16mf2_t vector, vint16m1_t scalar, - size_t vl) { - return vredmax(mask, dst, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16mf2_i16m1_m (vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst, - vint16m1_t vector, vint16m1_t scalar, - size_t vl) { - return vredmax(mask, dst, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m1_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst, - vint16m2_t vector, vint16m1_t scalar, - size_t vl) { - return vredmax(mask, dst, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m2_i16m1_m (vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst, - vint16m4_t vector, vint16m1_t scalar, - size_t vl) { - return vredmax(mask, dst, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m4_i16m1_m (vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst, - vint16m8_t vector, vint16m1_t scalar, - size_t vl) { - return vredmax(mask, dst, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m8_i16m1_m (vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst, - vint32mf2_t vector, vint32m1_t scalar, - size_t vl) { - return vredmax(mask, dst, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32mf2_i32m1_m (vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst, - vint32m1_t vector, vint32m1_t scalar, - size_t vl) { - return vredmax(mask, dst, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m1_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst, - vint32m2_t vector, vint32m1_t scalar, - size_t vl) { - return vredmax(mask, dst, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m2_i32m1_m (vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst, - vint32m4_t vector, vint32m1_t scalar, - size_t vl) { - return vredmax(mask, dst, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m4_i32m1_m (vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst, - vint32m8_t vector, vint32m1_t scalar, - size_t vl) { - return vredmax(mask, dst, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m8_i32m1_m (vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst, - vint64m1_t vector, vint64m1_t scalar, - size_t vl) { - return vredmax(mask, dst, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m1_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst, - vint64m2_t vector, vint64m1_t scalar, - size_t vl) { - return vredmax(mask, dst, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m2_i64m1_m (vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst, - vint64m4_t vector, vint64m1_t scalar, - size_t vl) { - return vredmax(mask, dst, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m4_i64m1_m (vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst, - vint64m8_t vector, vint64m1_t scalar, - size_t vl) { - return vredmax(mask, dst, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m8_i64m1_m (vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst, - vuint8mf8_t vector, vuint8m1_t scalar, - size_t vl) { - return vredmaxu(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_m (vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst, - vuint8mf4_t vector, vuint8m1_t scalar, - size_t vl) { - return vredmaxu(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_m (vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst, - vuint8mf2_t vector, vuint8m1_t scalar, - size_t vl) { - return vredmaxu(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_m (vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst, - vuint8m1_t vector, vuint8m1_t scalar, - size_t vl) { - return vredmaxu(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst, - vuint8m2_t vector, vuint8m1_t scalar, - size_t vl) { - return vredmaxu(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_m (vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst, - vuint8m4_t vector, vuint8m1_t scalar, - size_t vl) { - return vredmaxu(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_m (vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst, - vuint8m8_t vector, vuint8m1_t scalar, - size_t vl) { - return vredmaxu(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_m (vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst, - vuint16mf4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredmaxu(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_m (vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst, - vuint16mf2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredmaxu(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_m (vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst, - vuint16m1_t vector, - vuint16m1_t scalar, size_t vl) { - return vredmaxu(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst, - vuint16m2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredmaxu(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_m (vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst, - vuint16m4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredmaxu(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_m (vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst, - vuint16m8_t vector, - vuint16m1_t scalar, size_t vl) { - return vredmaxu(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_m (vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst, - vuint32mf2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredmaxu(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_m (vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst, - vuint32m1_t vector, - vuint32m1_t scalar, size_t vl) { - return vredmaxu(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst, - vuint32m2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredmaxu(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_m (vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst, - vuint32m4_t vector, - vuint32m1_t scalar, size_t vl) { - return vredmaxu(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_m (vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst, - vuint32m8_t vector, - vuint32m1_t scalar, size_t vl) { - return vredmaxu(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_m (vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst, - vuint64m1_t vector, - vuint64m1_t scalar, size_t vl) { - return vredmaxu(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst, - vuint64m2_t vector, - vuint64m1_t scalar, size_t vl) { - return vredmaxu(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_m (vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst, - vuint64m4_t vector, - vuint64m1_t scalar, size_t vl) { - return vredmaxu(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_m (vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst, - vuint64m8_t vector, - vuint64m1_t scalar, size_t vl) { - return vredmaxu(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_m (vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8mf8_i8m1_tu (vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8mf4_i8m1_tu (vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8mf2_i8m1_tu (vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m1_i8m1_tu (vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m2_i8m1_tu (vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m4_i8m1_tu (vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m8_i8m1_tu (vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16mf4_i16m1_tu (vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16mf2_i16m1_tu (vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m1_i16m1_tu (vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m2_i16m1_tu (vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m4_i16m1_tu (vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m8_i16m1_tu (vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32mf2_i32m1_tu(vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_tu(merge, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32mf2_i32m1_tu (vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_tu( +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu_tu(merge, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m1_i32m1_tu (vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_ta( +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_ta(vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m2_i32m1_tu (vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_ta( +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu_ta(vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m4_i32m1_tu (vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_tum( +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_tum(mask, merge, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m8_i32m1_tu (vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_tum( +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m1_i64m1_tu (vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m2_i64m1_tu (vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m4_i64m1_tu (vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m8_i64m1_tu (vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_tu (vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_tu (vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_tu (vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_tu (vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_tu (vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_tu (vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_tu (vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_tu (vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_tu (vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_tu (vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_tu (vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu_tum(mask, merge, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tu (vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_tam( +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_tam(mask, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_tam( +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_tu (vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_tu (vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { +vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_tu (vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_tu (vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_tu (vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_tu (vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8mf8_i8m1_ta (vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8mf4_i8m1_ta (vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8mf2_i8m1_ta (vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m1_i8m1_ta (vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m2_i8m1_ta (vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m4_i8m1_ta (vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m8_i8m1_ta (vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16mf4_i16m1_ta (vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16mf2_i16m1_ta (vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m1_i16m1_ta (vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m2_i16m1_ta (vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m4_i16m1_ta (vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m8_i16m1_ta (vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32mf2_i32m1_ta (vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m1_i32m1_ta (vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m2_i32m1_ta (vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m4_i32m1_ta (vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m8_i32m1_ta (vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m1_i64m1_ta (vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m2_i64m1_ta (vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m4_i64m1_ta (vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m8_i64m1_ta (vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_ta (vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_ta (vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_ta (vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_ta (vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_ta (vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_ta (vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_ta (vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_ta (vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_ta (vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_ta (vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_ta (vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_ta (vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_ta (vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_ta (vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_ta (vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_ta (vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_ta (vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_ta (vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_ta (vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_ta (vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_ta (vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_ta (vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8mf8_i8m1_tum (vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8mf4_i8m1_tum (vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8mf2_i8m1_tum (vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m1_i8m1_tum (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m2_i8m1_tum (vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m4_i8m1_tum (vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m8_i8m1_tum (vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16mf4_i16m1_tum (vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16mf2_i16m1_tum (vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m1_i16m1_tum (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m2_i16m1_tum (vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m4_i16m1_tum (vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m8_i16m1_tum (vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32mf2_i32m1_tum (vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m1_i32m1_tum (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m2_i32m1_tum (vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m4_i32m1_tum (vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m8_i32m1_tum (vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m1_i64m1_tum (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m2_i64m1_tum (vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m4_i64m1_tum (vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m8_i64m1_tum (vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_tum (vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_tum (vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_tum (vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_tum (vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_tum (vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_tum (vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_tum (vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_tum (vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_tum (vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_tum (vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_tum (vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tum (vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_tum (vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_tum (vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_tum (vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_tum (vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_tum (vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_tum (vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8mf8_i8m1_tam (vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8mf4_i8m1_tam (vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8mf2_i8m1_tam (vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m1_i8m1_tam (vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m2_i8m1_tam (vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m4_i8m1_tam (vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m8_i8m1_tam (vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16mf4_i16m1_tam (vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16mf2_i16m1_tam (vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m1_i16m1_tam (vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m2_i16m1_tam (vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m4_i16m1_tam (vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m8_i16m1_tam (vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32mf2_i32m1_tam (vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m1_i32m1_tam (vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m2_i32m1_tam (vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m4_i32m1_tam (vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m8_i32m1_tam (vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m1_i64m1_tam (vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m2_i64m1_tam (vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m4_i64m1_tam (vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m8_i64m1_tam (vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_tam (vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_tam (vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_tam (vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_tam (vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_tam (vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_tam (vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_tam (vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_tam (vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_tam (vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_tam (vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_tam (vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_tam (vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_tam (vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tam (vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_tam (vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_tam (vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_tam (vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_tam (vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_tam (vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_tam (vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_tam (vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_tam (vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { return vredmaxu_tam(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmin.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmin.c @@ -6,996 +6,2376 @@ // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector, - vint8m1_t scalar, size_t vl) { - return vredmin(dst, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf8_i8m1 (vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector, - vint8m1_t scalar, size_t vl) { - return vredmin(dst, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf4_i8m1 (vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector, - vint8m1_t scalar, size_t vl) { - return vredmin(dst, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf2_i8m1 (vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector, - vint8m1_t scalar, size_t vl) { - return vredmin(dst, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m1_i8m1 (vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector, - vint8m1_t scalar, size_t vl) { - return vredmin(dst, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m2_i8m1 (vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector, - vint8m1_t scalar, size_t vl) { - return vredmin(dst, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m4_i8m1 (vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector, - vint8m1_t scalar, size_t vl) { - return vredmin(dst, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m8_i8m1 (vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector, - vint16m1_t scalar, size_t vl) { - return vredmin(dst, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16mf4_i16m1 (vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector, - vint16m1_t scalar, size_t vl) { - return vredmin(dst, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16mf2_i16m1 (vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector, - vint16m1_t scalar, size_t vl) { - return vredmin(dst, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m1_i16m1 (vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector, - vint16m1_t scalar, size_t vl) { - return vredmin(dst, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m2_i16m1 (vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector, - vint16m1_t scalar, size_t vl) { - return vredmin(dst, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m4_i16m1 (vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector, - vint16m1_t scalar, size_t vl) { - return vredmin(dst, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m8_i16m1 (vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector, - vint32m1_t scalar, size_t vl) { - return vredmin(dst, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32mf2_i32m1 (vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector, - vint32m1_t scalar, size_t vl) { - return vredmin(dst, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m1_i32m1 (vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector, - vint32m1_t scalar, size_t vl) { - return vredmin(dst, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m2_i32m1 (vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector, - vint32m1_t scalar, size_t vl) { - return vredmin(dst, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m4_i32m1 (vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector, - vint32m1_t scalar, size_t vl) { - return vredmin(dst, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m8_i32m1 (vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector, - vint64m1_t scalar, size_t vl) { - return vredmin(dst, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m1_i64m1 (vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector, - vint64m1_t scalar, size_t vl) { - return vredmin(dst, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m2_i64m1 (vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector, - vint64m1_t scalar, size_t vl) { - return vredmin(dst, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m4_i64m1 (vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector, - vint64m1_t scalar, size_t vl) { - return vredmin(dst, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m8_i64m1 (vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector, - vuint8m1_t scalar, size_t vl) { - return vredminu(dst, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf8_u8m1 (vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector, - vuint8m1_t scalar, size_t vl) { - return vredminu(dst, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf4_u8m1 (vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector, - vuint8m1_t scalar, size_t vl) { - return vredminu(dst, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf2_u8m1 (vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector, - vuint8m1_t scalar, size_t vl) { - return vredminu(dst, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m1_u8m1 (vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector, - vuint8m1_t scalar, size_t vl) { - return vredminu(dst, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m2_u8m1 (vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector, - vuint8m1_t scalar, size_t vl) { - return vredminu(dst, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m4_u8m1 (vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector, - vuint8m1_t scalar, size_t vl) { - return vredminu(dst, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m8_u8m1 (vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredminu(dst, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16mf4_u16m1 (vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredminu(dst, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16mf2_u16m1 (vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector, - vuint16m1_t scalar, size_t vl) { - return vredminu(dst, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m1_u16m1 (vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredminu(dst, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m2_u16m1 (vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredminu(dst, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m4_u16m1 (vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector, - vuint16m1_t scalar, size_t vl) { - return vredminu(dst, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m8_u16m1 (vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredminu(dst, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32mf2_u32m1 (vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector, - vuint32m1_t scalar, size_t vl) { - return vredminu(dst, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m1_u32m1 (vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredminu(dst, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m2_u32m1 (vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector, - vuint32m1_t scalar, size_t vl) { - return vredminu(dst, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m4_u32m1 (vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector, - vuint32m1_t scalar, size_t vl) { - return vredminu(dst, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m8_u32m1 (vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector, - vuint64m1_t scalar, size_t vl) { - return vredminu(dst, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m1_u64m1 (vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector, - vuint64m1_t scalar, size_t vl) { - return vredminu(dst, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m2_u64m1 (vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector, - vuint64m1_t scalar, size_t vl) { - return vredminu(dst, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m4_u64m1 (vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector, - vuint64m1_t scalar, size_t vl) { - return vredminu(dst, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m8_u64m1 (vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst, - vint8mf8_t vector, vint8m1_t scalar, - size_t vl) { - return vredmin(mask, dst, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf8_i8m1_m (vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst, - vint8mf4_t vector, vint8m1_t scalar, - size_t vl) { - return vredmin(mask, dst, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf4_i8m1_m (vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst, - vint8mf2_t vector, vint8m1_t scalar, - size_t vl) { - return vredmin(mask, dst, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf2_i8m1_m (vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst, - vint8m1_t vector, vint8m1_t scalar, - size_t vl) { - return vredmin(mask, dst, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m1_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst, - vint8m2_t vector, vint8m1_t scalar, - size_t vl) { - return vredmin(mask, dst, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m2_i8m1_m (vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst, - vint8m4_t vector, vint8m1_t scalar, - size_t vl) { - return vredmin(mask, dst, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m4_i8m1_m (vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst, - vint8m8_t vector, vint8m1_t scalar, - size_t vl) { - return vredmin(mask, dst, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m8_i8m1_m (vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst, - vint16mf4_t vector, vint16m1_t scalar, - size_t vl) { - return vredmin(mask, dst, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16mf4_i16m1_m (vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst, - vint16mf2_t vector, vint16m1_t scalar, - size_t vl) { - return vredmin(mask, dst, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16mf2_i16m1_m (vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst, - vint16m1_t vector, vint16m1_t scalar, - size_t vl) { - return vredmin(mask, dst, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m1_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst, - vint16m2_t vector, vint16m1_t scalar, - size_t vl) { - return vredmin(mask, dst, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m2_i16m1_m (vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst, - vint16m4_t vector, vint16m1_t scalar, - size_t vl) { - return vredmin(mask, dst, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m4_i16m1_m (vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst, - vint16m8_t vector, vint16m1_t scalar, - size_t vl) { - return vredmin(mask, dst, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m8_i16m1_m (vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst, - vint32mf2_t vector, vint32m1_t scalar, - size_t vl) { - return vredmin(mask, dst, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32mf2_i32m1_m (vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst, - vint32m1_t vector, vint32m1_t scalar, - size_t vl) { - return vredmin(mask, dst, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m1_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst, - vint32m2_t vector, vint32m1_t scalar, - size_t vl) { - return vredmin(mask, dst, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m2_i32m1_m (vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst, - vint32m4_t vector, vint32m1_t scalar, - size_t vl) { - return vredmin(mask, dst, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m4_i32m1_m (vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst, - vint32m8_t vector, vint32m1_t scalar, - size_t vl) { - return vredmin(mask, dst, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m8_i32m1_m (vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst, - vint64m1_t vector, vint64m1_t scalar, - size_t vl) { - return vredmin(mask, dst, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m1_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst, - vint64m2_t vector, vint64m1_t scalar, - size_t vl) { - return vredmin(mask, dst, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m2_i64m1_m (vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst, - vint64m4_t vector, vint64m1_t scalar, - size_t vl) { - return vredmin(mask, dst, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m4_i64m1_m (vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst, - vint64m8_t vector, vint64m1_t scalar, - size_t vl) { - return vredmin(mask, dst, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m8_i64m1_m (vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst, - vuint8mf8_t vector, vuint8m1_t scalar, - size_t vl) { - return vredminu(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf8_u8m1_m (vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst, - vuint8mf4_t vector, vuint8m1_t scalar, - size_t vl) { - return vredminu(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf4_u8m1_m (vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst, - vuint8mf2_t vector, vuint8m1_t scalar, - size_t vl) { - return vredminu(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf2_u8m1_m (vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst, - vuint8m1_t vector, vuint8m1_t scalar, - size_t vl) { - return vredminu(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m1_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst, - vuint8m2_t vector, vuint8m1_t scalar, - size_t vl) { - return vredminu(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m2_u8m1_m (vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst, - vuint8m4_t vector, vuint8m1_t scalar, - size_t vl) { - return vredminu(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m4_u8m1_m (vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst, - vuint8m8_t vector, vuint8m1_t scalar, - size_t vl) { - return vredminu(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m8_u8m1_m (vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst, - vuint16mf4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredminu(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16mf4_u16m1_m (vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst, - vuint16mf2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredminu(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16mf2_u16m1_m (vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst, - vuint16m1_t vector, - vuint16m1_t scalar, size_t vl) { - return vredminu(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m1_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst, - vuint16m2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredminu(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m2_u16m1_m (vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst, - vuint16m4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredminu(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m4_u16m1_m (vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst, - vuint16m8_t vector, - vuint16m1_t scalar, size_t vl) { - return vredminu(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m8_u16m1_m (vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst, - vuint32mf2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredminu(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32mf2_u32m1_m (vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst, - vuint32m1_t vector, - vuint32m1_t scalar, size_t vl) { - return vredminu(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m1_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst, - vuint32m2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredminu(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m2_u32m1_m (vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst, - vuint32m4_t vector, - vuint32m1_t scalar, size_t vl) { - return vredminu(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m4_u32m1_m (vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst, - vuint32m8_t vector, - vuint32m1_t scalar, size_t vl) { - return vredminu(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m8_u32m1_m (vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst, - vuint64m1_t vector, - vuint64m1_t scalar, size_t vl) { - return vredminu(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m1_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst, - vuint64m2_t vector, - vuint64m1_t scalar, size_t vl) { - return vredminu(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m2_u64m1_m (vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst, - vuint64m4_t vector, - vuint64m1_t scalar, size_t vl) { - return vredminu(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m4_u64m1_m (vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst, - vuint64m8_t vector, - vuint64m1_t scalar, size_t vl) { - return vredminu(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m8_u64m1_m (vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8mf8_i8m1_tu (vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8mf4_i8m1_tu (vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8mf2_i8m1_tu (vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m1_i8m1_tu (vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m2_i8m1_tu (vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m4_i8m1_tu (vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m8_i8m1_tu (vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16mf4_i16m1_tu (vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16mf2_i16m1_tu (vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m1_i16m1_tu (vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m2_i16m1_tu (vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m4_i16m1_tu (vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m8_i16m1_tu (vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32mf2_i32m1_tu(vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_tu(merge, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32mf2_i32m1_tu (vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_tu( +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu_tu(merge, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m1_i32m1_tu (vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_ta( +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_ta(vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m2_i32m1_tu (vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_ta( +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu_ta(vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m4_i32m1_tu (vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_tum( +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_tum(mask, merge, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m8_i32m1_tu (vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_tum( +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m1_i64m1_tu (vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m2_i64m1_tu (vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m4_i64m1_tu (vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m8_i64m1_tu (vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8mf8_u8m1_tu (vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8mf4_u8m1_tu (vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8mf2_u8m1_tu (vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m1_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m2_u8m1_tu (vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m4_u8m1_tu (vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m8_u8m1_tu (vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16mf4_u16m1_tu (vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16mf2_u16m1_tu (vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m1_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m2_u16m1_tu (vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m4_u16m1_tu (vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m8_u16m1_tu (vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu_tum(mask, merge, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tu (vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_tam( +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_tam(mask, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m1_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_tam( +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m2_u32m1_tu (vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m4_u32m1_tu (vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { +vuint32m1_t test_vredminu_vs_u32m8_u32m1_tu (vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m1_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m2_u64m1_tu (vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m4_u64m1_tu (vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m8_u64m1_tu (vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8mf8_i8m1_ta (vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8mf4_i8m1_ta (vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8mf2_i8m1_ta (vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m1_i8m1_ta (vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m2_i8m1_ta (vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m4_i8m1_ta (vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m8_i8m1_ta (vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16mf4_i16m1_ta (vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16mf2_i16m1_ta (vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m1_i16m1_ta (vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m2_i16m1_ta (vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m4_i16m1_ta (vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m8_i16m1_ta (vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32mf2_i32m1_ta (vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m1_i32m1_ta (vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m2_i32m1_ta (vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m4_i32m1_ta (vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m8_i32m1_ta (vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m1_i64m1_ta (vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m2_i64m1_ta (vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m4_i64m1_ta (vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m8_i64m1_ta (vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8mf8_u8m1_ta (vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8mf4_u8m1_ta (vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8mf2_u8m1_ta (vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m1_u8m1_ta (vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m2_u8m1_ta (vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m4_u8m1_ta (vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m8_u8m1_ta (vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16mf4_u16m1_ta (vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16mf2_u16m1_ta (vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m1_u16m1_ta (vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m2_u16m1_ta (vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m4_u16m1_ta (vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m8_u16m1_ta (vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32mf2_u32m1_ta (vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m1_u32m1_ta (vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m2_u32m1_ta (vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m4_u32m1_ta (vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m8_u32m1_ta (vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m1_u64m1_ta (vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m2_u64m1_ta (vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m4_u64m1_ta (vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m8_u64m1_ta (vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8mf8_i8m1_tum (vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8mf4_i8m1_tum (vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8mf2_i8m1_tum (vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m1_i8m1_tum (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m2_i8m1_tum (vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m4_i8m1_tum (vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m8_i8m1_tum (vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16mf4_i16m1_tum (vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16mf2_i16m1_tum (vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m1_i16m1_tum (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m2_i16m1_tum (vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m4_i16m1_tum (vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m8_i16m1_tum (vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32mf2_i32m1_tum (vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m1_i32m1_tum (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m2_i32m1_tum (vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m4_i32m1_tum (vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m8_i32m1_tum (vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m1_i64m1_tum (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m2_i64m1_tum (vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m4_i64m1_tum (vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m8_i64m1_tum (vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8mf8_u8m1_tum (vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8mf4_u8m1_tum (vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8mf2_u8m1_tum (vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m1_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m2_u8m1_tum (vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m4_u8m1_tum (vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m8_u8m1_tum (vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16mf4_u16m1_tum (vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16mf2_u16m1_tum (vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m1_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m2_u16m1_tum (vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m4_u16m1_tum (vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m8_u16m1_tum (vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tum (vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m1_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m2_u32m1_tum (vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m4_u32m1_tum (vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m8_u32m1_tum (vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m1_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m2_u64m1_tum (vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m4_u64m1_tum (vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m8_u64m1_tum (vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8mf8_i8m1_tam (vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8mf4_i8m1_tam (vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8mf2_i8m1_tam (vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m1_i8m1_tam (vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m2_i8m1_tam (vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m4_i8m1_tam (vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m8_i8m1_tam (vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16mf4_i16m1_tam (vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16mf2_i16m1_tam (vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m1_i16m1_tam (vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m2_i16m1_tam (vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m4_i16m1_tam (vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m8_i16m1_tam (vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32mf2_i32m1_tam (vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m1_i32m1_tam (vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m2_i32m1_tam (vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m4_i32m1_tam (vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m8_i32m1_tam (vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m1_i64m1_tam (vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m2_i64m1_tam (vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m4_i64m1_tam (vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m8_i64m1_tam (vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8mf8_u8m1_tam (vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8mf4_u8m1_tam (vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8mf2_u8m1_tam (vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m1_u8m1_tam (vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m2_u8m1_tam (vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m4_u8m1_tam (vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m8_u8m1_tam (vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16mf4_u16m1_tam (vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16mf2_u16m1_tam (vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m1_u16m1_tam (vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m2_u16m1_tam (vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m4_u16m1_tam (vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m8_u16m1_tam (vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tam (vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m1_u32m1_tam (vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m2_u32m1_tam (vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m4_u32m1_tam (vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m8_u32m1_tam (vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m1_u64m1_tam (vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m2_u64m1_tam (vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m4_u64m1_tam (vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m8_u64m1_tam (vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { return vredminu_tam(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredor.c @@ -6,996 +6,2376 @@ // CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector, - vint8m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf8_i8m1 (vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector, - vint8m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf4_i8m1 (vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector, - vint8m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf2_i8m1 (vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector, - vint8m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m1_i8m1 (vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector, - vint8m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m2_i8m1 (vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector, - vint8m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m4_i8m1 (vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector, - vint8m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m8_i8m1 (vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector, - vint16m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16mf4_i16m1 (vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector, - vint16m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16mf2_i16m1 (vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector, - vint16m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m1_i16m1 (vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector, - vint16m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m2_i16m1 (vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector, - vint16m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m4_i16m1 (vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector, - vint16m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m8_i16m1 (vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector, - vint32m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32mf2_i32m1 (vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector, - vint32m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m1_i32m1 (vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector, - vint32m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m2_i32m1 (vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector, - vint32m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m4_i32m1 (vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector, - vint32m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m8_i32m1 (vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector, - vint64m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m1_i64m1 (vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector, - vint64m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m2_i64m1 (vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector, - vint64m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m4_i64m1 (vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector, - vint64m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m8_i64m1 (vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector, - vuint8m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf8_u8m1 (vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector, - vuint8m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf4_u8m1 (vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector, - vuint8m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf2_u8m1 (vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector, - vuint8m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m1_u8m1 (vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector, - vuint8m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m2_u8m1 (vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector, - vuint8m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m4_u8m1 (vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector, - vuint8m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m8_u8m1 (vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16mf4_u16m1 (vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16mf2_u16m1 (vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector, - vuint16m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m1_u16m1 (vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m2_u16m1 (vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m4_u16m1 (vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector, - vuint16m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m8_u16m1 (vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32mf2_u32m1 (vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector, - vuint32m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m1_u32m1 (vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m2_u32m1 (vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector, - vuint32m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m4_u32m1 (vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector, - vuint32m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m8_u32m1 (vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector, - vuint64m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m1_u64m1 (vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector, - vuint64m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m2_u64m1 (vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector, - vuint64m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m4_u64m1 (vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector, - vuint64m1_t scalar, size_t vl) { - return vredor(dst, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m8_u64m1 (vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst, - vint8mf8_t vector, vint8m1_t scalar, - size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf8_i8m1_m (vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst, - vint8mf4_t vector, vint8m1_t scalar, - size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf4_i8m1_m (vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst, - vint8mf2_t vector, vint8m1_t scalar, - size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf2_i8m1_m (vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst, - vint8m1_t vector, vint8m1_t scalar, - size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m1_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst, - vint8m2_t vector, vint8m1_t scalar, - size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m2_i8m1_m (vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst, - vint8m4_t vector, vint8m1_t scalar, - size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m4_i8m1_m (vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst, - vint8m8_t vector, vint8m1_t scalar, - size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m8_i8m1_m (vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst, - vint16mf4_t vector, vint16m1_t scalar, - size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16mf4_i16m1_m (vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst, - vint16mf2_t vector, vint16m1_t scalar, - size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16mf2_i16m1_m (vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst, - vint16m1_t vector, vint16m1_t scalar, - size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m1_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst, - vint16m2_t vector, vint16m1_t scalar, - size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m2_i16m1_m (vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst, - vint16m4_t vector, vint16m1_t scalar, - size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m4_i16m1_m (vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst, - vint16m8_t vector, vint16m1_t scalar, - size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m8_i16m1_m (vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst, - vint32mf2_t vector, vint32m1_t scalar, - size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32mf2_i32m1_m (vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst, - vint32m1_t vector, vint32m1_t scalar, - size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m1_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst, - vint32m2_t vector, vint32m1_t scalar, - size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m2_i32m1_m (vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst, - vint32m4_t vector, vint32m1_t scalar, - size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m4_i32m1_m (vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst, - vint32m8_t vector, vint32m1_t scalar, - size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m8_i32m1_m (vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst, - vint64m1_t vector, vint64m1_t scalar, - size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m1_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst, - vint64m2_t vector, vint64m1_t scalar, - size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m2_i64m1_m (vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst, - vint64m4_t vector, vint64m1_t scalar, - size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m4_i64m1_m (vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst, - vint64m8_t vector, vint64m1_t scalar, - size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m8_i64m1_m (vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst, - vuint8mf8_t vector, vuint8m1_t scalar, - size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf8_u8m1_m (vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst, - vuint8mf4_t vector, vuint8m1_t scalar, - size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf4_u8m1_m (vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst, - vuint8mf2_t vector, vuint8m1_t scalar, - size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf2_u8m1_m (vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst, - vuint8m1_t vector, vuint8m1_t scalar, - size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m1_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst, - vuint8m2_t vector, vuint8m1_t scalar, - size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m2_u8m1_m (vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst, - vuint8m4_t vector, vuint8m1_t scalar, - size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m4_u8m1_m (vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst, - vuint8m8_t vector, vuint8m1_t scalar, - size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m8_u8m1_m (vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst, - vuint16mf4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16mf4_u16m1_m (vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst, - vuint16mf2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16mf2_u16m1_m (vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst, - vuint16m1_t vector, vuint16m1_t scalar, - size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m1_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst, - vuint16m2_t vector, vuint16m1_t scalar, - size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m2_u16m1_m (vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst, - vuint16m4_t vector, vuint16m1_t scalar, - size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m4_u16m1_m (vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst, - vuint16m8_t vector, vuint16m1_t scalar, - size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m8_u16m1_m (vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst, - vuint32mf2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32mf2_u32m1_m (vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst, - vuint32m1_t vector, vuint32m1_t scalar, - size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m1_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst, - vuint32m2_t vector, vuint32m1_t scalar, - size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m2_u32m1_m (vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst, - vuint32m4_t vector, vuint32m1_t scalar, - size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m4_u32m1_m (vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst, - vuint32m8_t vector, vuint32m1_t scalar, - size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m8_u32m1_m (vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst, - vuint64m1_t vector, vuint64m1_t scalar, - size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m1_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst, - vuint64m2_t vector, vuint64m1_t scalar, - size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m2_u64m1_m (vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst, - vuint64m4_t vector, vuint64m1_t scalar, - size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m4_u64m1_m (vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst, - vuint64m8_t vector, vuint64m1_t scalar, - size_t vl) { - return vredor(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m8_u64m1_m (vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8mf8_i8m1_tu (vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8mf4_i8m1_tu (vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8mf2_i8m1_tu (vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m1_i8m1_tu (vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m2_i8m1_tu (vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m4_i8m1_tu (vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m8_i8m1_tu (vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16mf4_i16m1_tu (vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16mf2_i16m1_tu (vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m1_i16m1_tu (vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m2_i16m1_tu (vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m4_i16m1_tu (vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m8_i16m1_tu (vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32mf2_i32m1_tu(vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredor_tu(merge, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32mf2_i32m1_tu (vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_tu( +// CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredor_tu(merge, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m1_i32m1_tu (vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_ta( +// CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredor_ta(vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m2_i32m1_tu (vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_ta( +// CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredor_ta(vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m4_i32m1_tu (vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_tum( +// CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredor_tum(mask, merge, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m8_i32m1_tu (vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_tum( +// CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m1_i64m1_tu (vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m2_i64m1_tu (vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m4_i64m1_tu (vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m8_i64m1_tu (vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8mf8_u8m1_tu (vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8mf4_u8m1_tu (vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8mf2_u8m1_tu (vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m1_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m2_u8m1_tu (vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m4_u8m1_tu (vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m8_u8m1_tu (vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16mf4_u16m1_tu (vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16mf2_u16m1_tu (vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m1_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m2_u16m1_tu (vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m4_u16m1_tu (vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m8_u16m1_tu (vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredor_tum(mask, merge, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32mf2_u32m1_tu (vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_tam( +// CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredor_tam(mask, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m1_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_tam( +// CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m2_u32m1_tu (vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m4_u32m1_tu (vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { +vuint32m1_t test_vredor_vs_u32m8_u32m1_tu (vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m1_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m2_u64m1_tu (vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m4_u64m1_tu (vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m8_u64m1_tu (vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8mf8_i8m1_ta (vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8mf4_i8m1_ta (vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8mf2_i8m1_ta (vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m1_i8m1_ta (vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m2_i8m1_ta (vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m4_i8m1_ta (vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m8_i8m1_ta (vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16mf4_i16m1_ta (vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16mf2_i16m1_ta (vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m1_i16m1_ta (vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m2_i16m1_ta (vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m4_i16m1_ta (vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m8_i16m1_ta (vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32mf2_i32m1_ta (vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m1_i32m1_ta (vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m2_i32m1_ta (vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m4_i32m1_ta (vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m8_i32m1_ta (vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m1_i64m1_ta (vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m2_i64m1_ta (vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m4_i64m1_ta (vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m8_i64m1_ta (vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8mf8_u8m1_ta (vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8mf4_u8m1_ta (vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8mf2_u8m1_ta (vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m1_u8m1_ta (vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m2_u8m1_ta (vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m4_u8m1_ta (vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m8_u8m1_ta (vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16mf4_u16m1_ta (vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16mf2_u16m1_ta (vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m1_u16m1_ta (vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m2_u16m1_ta (vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m4_u16m1_ta (vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m8_u16m1_ta (vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32mf2_u32m1_ta (vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m1_u32m1_ta (vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m2_u32m1_ta (vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m4_u32m1_ta (vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m8_u32m1_ta (vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m1_u64m1_ta (vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m2_u64m1_ta (vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m4_u64m1_ta (vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m8_u64m1_ta (vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8mf8_i8m1_tum (vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8mf4_i8m1_tum (vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8mf2_i8m1_tum (vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m1_i8m1_tum (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m2_i8m1_tum (vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m4_i8m1_tum (vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m8_i8m1_tum (vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16mf4_i16m1_tum (vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16mf2_i16m1_tum (vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m1_i16m1_tum (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m2_i16m1_tum (vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m4_i16m1_tum (vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m8_i16m1_tum (vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32mf2_i32m1_tum (vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m1_i32m1_tum (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m2_i32m1_tum (vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m4_i32m1_tum (vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m8_i32m1_tum (vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m1_i64m1_tum (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m2_i64m1_tum (vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m4_i64m1_tum (vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m8_i64m1_tum (vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8mf8_u8m1_tum (vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8mf4_u8m1_tum (vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8mf2_u8m1_tum (vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m1_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m2_u8m1_tum (vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m4_u8m1_tum (vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m8_u8m1_tum (vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16mf4_u16m1_tum (vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16mf2_u16m1_tum (vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m1_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m2_u16m1_tum (vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m4_u16m1_tum (vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m8_u16m1_tum (vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32mf2_u32m1_tum (vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m1_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m2_u32m1_tum (vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m4_u32m1_tum (vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m8_u32m1_tum (vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m1_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m2_u64m1_tum (vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m4_u64m1_tum (vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m8_u64m1_tum (vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8mf8_i8m1_tam (vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8mf4_i8m1_tam (vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8mf2_i8m1_tam (vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m1_i8m1_tam (vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m2_i8m1_tam (vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m4_i8m1_tam (vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m8_i8m1_tam (vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16mf4_i16m1_tam (vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16mf2_i16m1_tam (vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m1_i16m1_tam (vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m2_i16m1_tam (vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m4_i16m1_tam (vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m8_i16m1_tam (vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32mf2_i32m1_tam (vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m1_i32m1_tam (vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m2_i32m1_tam (vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m4_i32m1_tam (vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m8_i32m1_tam (vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m1_i64m1_tam (vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m2_i64m1_tam (vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m4_i64m1_tam (vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m8_i64m1_tam (vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8mf8_u8m1_tam (vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8mf4_u8m1_tam (vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8mf2_u8m1_tam (vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m1_u8m1_tam (vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m2_u8m1_tam (vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m4_u8m1_tam (vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m8_u8m1_tam (vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16mf4_u16m1_tam (vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16mf2_u16m1_tam (vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m1_u16m1_tam (vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m2_u16m1_tam (vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m4_u16m1_tam (vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m8_u16m1_tam (vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32mf2_u32m1_tam (vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m1_u32m1_tam (vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m2_u32m1_tam (vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m4_u32m1_tam (vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m8_u32m1_tam (vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m1_u64m1_tam (vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m2_u64m1_tam (vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m4_u64m1_tam (vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m8_u64m1_tam (vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { return vredor_tam(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredsum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredsum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredsum.c @@ -6,996 +6,2376 @@ // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector, - vint8m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf8_i8m1 (vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector, - vint8m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf4_i8m1 (vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector, - vint8m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf2_i8m1 (vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector, - vint8m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m1_i8m1 (vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector, - vint8m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m2_i8m1 (vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector, - vint8m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m4_i8m1 (vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector, - vint8m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m8_i8m1 (vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector, - vint16m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16mf4_i16m1 (vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector, - vint16m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16mf2_i16m1 (vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector, - vint16m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m1_i16m1 (vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector, - vint16m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m2_i16m1 (vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector, - vint16m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m4_i16m1 (vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector, - vint16m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m8_i16m1 (vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector, - vint32m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32mf2_i32m1 (vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector, - vint32m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m1_i32m1 (vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector, - vint32m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m2_i32m1 (vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector, - vint32m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m4_i32m1 (vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector, - vint32m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m8_i32m1 (vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector, - vint64m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m1_i64m1 (vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector, - vint64m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m2_i64m1 (vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector, - vint64m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m4_i64m1 (vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector, - vint64m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m8_i64m1 (vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector, - vuint8m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf8_u8m1 (vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector, - vuint8m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf4_u8m1 (vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector, - vuint8m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf2_u8m1 (vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector, - vuint8m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m1_u8m1 (vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector, - vuint8m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m2_u8m1 (vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector, - vuint8m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m4_u8m1 (vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector, - vuint8m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m8_u8m1 (vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16mf4_u16m1 (vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16mf2_u16m1 (vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector, - vuint16m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m1_u16m1 (vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m2_u16m1 (vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m4_u16m1 (vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector, - vuint16m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m8_u16m1 (vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32mf2_u32m1 (vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector, - vuint32m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m1_u32m1 (vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m2_u32m1 (vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector, - vuint32m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m4_u32m1 (vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector, - vuint32m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m8_u32m1 (vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector, - vuint64m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m1_u64m1 (vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector, - vuint64m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m2_u64m1 (vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector, - vuint64m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m4_u64m1 (vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector, - vuint64m1_t scalar, size_t vl) { - return vredsum(dst, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m8_u64m1 (vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst, - vint8mf8_t vector, vint8m1_t scalar, - size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf8_i8m1_m (vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst, - vint8mf4_t vector, vint8m1_t scalar, - size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf4_i8m1_m (vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst, - vint8mf2_t vector, vint8m1_t scalar, - size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf2_i8m1_m (vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst, - vint8m1_t vector, vint8m1_t scalar, - size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m1_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst, - vint8m2_t vector, vint8m1_t scalar, - size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m2_i8m1_m (vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst, - vint8m4_t vector, vint8m1_t scalar, - size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m4_i8m1_m (vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst, - vint8m8_t vector, vint8m1_t scalar, - size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m8_i8m1_m (vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst, - vint16mf4_t vector, vint16m1_t scalar, - size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16mf4_i16m1_m (vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst, - vint16mf2_t vector, vint16m1_t scalar, - size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16mf2_i16m1_m (vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst, - vint16m1_t vector, vint16m1_t scalar, - size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m1_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst, - vint16m2_t vector, vint16m1_t scalar, - size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m2_i16m1_m (vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst, - vint16m4_t vector, vint16m1_t scalar, - size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m4_i16m1_m (vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst, - vint16m8_t vector, vint16m1_t scalar, - size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m8_i16m1_m (vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst, - vint32mf2_t vector, vint32m1_t scalar, - size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32mf2_i32m1_m (vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst, - vint32m1_t vector, vint32m1_t scalar, - size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m1_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst, - vint32m2_t vector, vint32m1_t scalar, - size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m2_i32m1_m (vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst, - vint32m4_t vector, vint32m1_t scalar, - size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m4_i32m1_m (vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst, - vint32m8_t vector, vint32m1_t scalar, - size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m8_i32m1_m (vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst, - vint64m1_t vector, vint64m1_t scalar, - size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m1_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst, - vint64m2_t vector, vint64m1_t scalar, - size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m2_i64m1_m (vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst, - vint64m4_t vector, vint64m1_t scalar, - size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m4_i64m1_m (vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst, - vint64m8_t vector, vint64m1_t scalar, - size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m8_i64m1_m (vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst, - vuint8mf8_t vector, vuint8m1_t scalar, - size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf8_u8m1_m (vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst, - vuint8mf4_t vector, vuint8m1_t scalar, - size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf4_u8m1_m (vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst, - vuint8mf2_t vector, vuint8m1_t scalar, - size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf2_u8m1_m (vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst, - vuint8m1_t vector, vuint8m1_t scalar, - size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m1_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst, - vuint8m2_t vector, vuint8m1_t scalar, - size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m2_u8m1_m (vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst, - vuint8m4_t vector, vuint8m1_t scalar, - size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m4_u8m1_m (vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst, - vuint8m8_t vector, vuint8m1_t scalar, - size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m8_u8m1_m (vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst, - vuint16mf4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16mf4_u16m1_m (vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst, - vuint16mf2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16mf2_u16m1_m (vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst, - vuint16m1_t vector, - vuint16m1_t scalar, size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m1_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst, - vuint16m2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m2_u16m1_m (vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst, - vuint16m4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m4_u16m1_m (vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst, - vuint16m8_t vector, - vuint16m1_t scalar, size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m8_u16m1_m (vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst, - vuint32mf2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32mf2_u32m1_m (vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst, - vuint32m1_t vector, - vuint32m1_t scalar, size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m1_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst, - vuint32m2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m2_u32m1_m (vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst, - vuint32m4_t vector, - vuint32m1_t scalar, size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m4_u32m1_m (vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst, - vuint32m8_t vector, - vuint32m1_t scalar, size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m8_u32m1_m (vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst, - vuint64m1_t vector, - vuint64m1_t scalar, size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m1_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst, - vuint64m2_t vector, - vuint64m1_t scalar, size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m2_u64m1_m (vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst, - vuint64m4_t vector, - vuint64m1_t scalar, size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m4_u64m1_m (vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst, - vuint64m8_t vector, - vuint64m1_t scalar, size_t vl) { - return vredsum(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m8_u64m1_m (vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8mf8_i8m1_tu (vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8mf4_i8m1_tu (vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8mf2_i8m1_tu (vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m1_i8m1_tu (vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m2_i8m1_tu (vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m4_i8m1_tu (vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m8_i8m1_tu (vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16mf4_i16m1_tu (vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16mf2_i16m1_tu (vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m1_i16m1_tu (vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m2_i16m1_tu (vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m4_i16m1_tu (vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m8_i16m1_tu (vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32mf2_i32m1_tu(vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_tu(merge, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32mf2_i32m1_tu (vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_tu( +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum_tu(merge, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m1_i32m1_tu (vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_ta( +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_ta(vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m2_i32m1_tu (vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_ta( +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum_ta(vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m4_i32m1_tu (vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_tum( +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_tum(mask, merge, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m8_i32m1_tu (vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_tum( +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m1_i64m1_tu (vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m2_i64m1_tu (vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m4_i64m1_tu (vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m8_i64m1_tu (vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8mf8_u8m1_tu (vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8mf4_u8m1_tu (vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8mf2_u8m1_tu (vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m1_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m2_u8m1_tu (vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m4_u8m1_tu (vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m8_u8m1_tu (vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16mf4_u16m1_tu (vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16mf2_u16m1_tu (vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m1_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m2_u16m1_tu (vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m4_u16m1_tu (vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m8_u16m1_tu (vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum_tum(mask, merge, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tu (vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_tam( +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_tam(mask, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m1_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_tam( +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m2_u32m1_tu (vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m4_u32m1_tu (vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { +vuint32m1_t test_vredsum_vs_u32m8_u32m1_tu (vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m1_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m2_u64m1_tu (vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m4_u64m1_tu (vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m8_u64m1_tu (vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8mf8_i8m1_ta (vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8mf4_i8m1_ta (vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8mf2_i8m1_ta (vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m1_i8m1_ta (vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m2_i8m1_ta (vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m4_i8m1_ta (vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m8_i8m1_ta (vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16mf4_i16m1_ta (vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16mf2_i16m1_ta (vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m1_i16m1_ta (vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m2_i16m1_ta (vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m4_i16m1_ta (vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m8_i16m1_ta (vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32mf2_i32m1_ta (vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m1_i32m1_ta (vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m2_i32m1_ta (vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m4_i32m1_ta (vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m8_i32m1_ta (vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m1_i64m1_ta (vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m2_i64m1_ta (vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m4_i64m1_ta (vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m8_i64m1_ta (vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8mf8_u8m1_ta (vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8mf4_u8m1_ta (vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8mf2_u8m1_ta (vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m1_u8m1_ta (vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m2_u8m1_ta (vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m4_u8m1_ta (vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m8_u8m1_ta (vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16mf4_u16m1_ta (vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16mf2_u16m1_ta (vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m1_u16m1_ta (vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m2_u16m1_ta (vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m4_u16m1_ta (vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m8_u16m1_ta (vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32mf2_u32m1_ta (vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m1_u32m1_ta (vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m2_u32m1_ta (vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m4_u32m1_ta (vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m8_u32m1_ta (vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m1_u64m1_ta (vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m2_u64m1_ta (vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m4_u64m1_ta (vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m8_u64m1_ta (vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8mf8_i8m1_tum (vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8mf4_i8m1_tum (vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8mf2_i8m1_tum (vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m1_i8m1_tum (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m2_i8m1_tum (vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m4_i8m1_tum (vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m8_i8m1_tum (vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16mf4_i16m1_tum (vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16mf2_i16m1_tum (vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m1_i16m1_tum (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m2_i16m1_tum (vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m4_i16m1_tum (vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m8_i16m1_tum (vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32mf2_i32m1_tum (vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m1_i32m1_tum (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m2_i32m1_tum (vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m4_i32m1_tum (vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m8_i32m1_tum (vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m1_i64m1_tum (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m2_i64m1_tum (vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m4_i64m1_tum (vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m8_i64m1_tum (vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8mf8_u8m1_tum (vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8mf4_u8m1_tum (vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8mf2_u8m1_tum (vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m1_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m2_u8m1_tum (vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m4_u8m1_tum (vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m8_u8m1_tum (vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16mf4_u16m1_tum (vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16mf2_u16m1_tum (vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m1_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m2_u16m1_tum (vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m4_u16m1_tum (vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m8_u16m1_tum (vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tum (vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m1_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m2_u32m1_tum (vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m4_u32m1_tum (vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m8_u32m1_tum (vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m1_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m2_u64m1_tum (vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m4_u64m1_tum (vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m8_u64m1_tum (vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8mf8_i8m1_tam (vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8mf4_i8m1_tam (vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8mf2_i8m1_tam (vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m1_i8m1_tam (vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m2_i8m1_tam (vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m4_i8m1_tam (vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m8_i8m1_tam (vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16mf4_i16m1_tam (vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16mf2_i16m1_tam (vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m1_i16m1_tam (vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m2_i16m1_tam (vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m4_i16m1_tam (vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m8_i16m1_tam (vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32mf2_i32m1_tam (vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m1_i32m1_tam (vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m2_i32m1_tam (vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m4_i32m1_tam (vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m8_i32m1_tam (vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m1_i64m1_tam (vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m2_i64m1_tam (vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m4_i64m1_tam (vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m8_i64m1_tam (vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8mf8_u8m1_tam (vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8mf4_u8m1_tam (vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8mf2_u8m1_tam (vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m1_u8m1_tam (vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m2_u8m1_tam (vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m4_u8m1_tam (vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m8_u8m1_tam (vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16mf4_u16m1_tam (vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16mf2_u16m1_tam (vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m1_u16m1_tam (vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m2_u16m1_tam (vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m4_u16m1_tam (vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m8_u16m1_tam (vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tam (vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m1_u32m1_tam (vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m2_u32m1_tam (vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m4_u32m1_tam (vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m8_u32m1_tam (vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m1_u64m1_tam (vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m2_u64m1_tam (vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m4_u64m1_tam (vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m8_u64m1_tam (vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { return vredsum_tam(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredxor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredxor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredxor.c @@ -6,996 +6,2376 @@ // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector, - vint8m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf8_i8m1 (vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector, - vint8m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf4_i8m1 (vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector, - vint8m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf2_i8m1 (vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector, - vint8m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m1_i8m1 (vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector, - vint8m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m2_i8m1 (vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector, - vint8m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m4_i8m1 (vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector, - vint8m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m8_i8m1 (vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector, - vint16m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16mf4_i16m1 (vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector, - vint16m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16mf2_i16m1 (vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector, - vint16m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m1_i16m1 (vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector, - vint16m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m2_i16m1 (vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector, - vint16m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m4_i16m1 (vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector, - vint16m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m8_i16m1 (vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector, - vint32m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32mf2_i32m1 (vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector, - vint32m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m1_i32m1 (vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector, - vint32m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m2_i32m1 (vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector, - vint32m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m4_i32m1 (vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector, - vint32m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m8_i32m1 (vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector, - vint64m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m1_i64m1 (vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector, - vint64m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m2_i64m1 (vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector, - vint64m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m4_i64m1 (vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector, - vint64m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m8_i64m1 (vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector, - vuint8m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf8_u8m1 (vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector, - vuint8m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf4_u8m1 (vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector, - vuint8m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf2_u8m1 (vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector, - vuint8m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m1_u8m1 (vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector, - vuint8m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m2_u8m1 (vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector, - vuint8m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m4_u8m1 (vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector, - vuint8m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m8_u8m1 (vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16mf4_u16m1 (vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16mf2_u16m1 (vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector, - vuint16m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m1_u16m1 (vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m2_u16m1 (vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m4_u16m1 (vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector, - vuint16m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m8_u16m1 (vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32mf2_u32m1 (vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector, - vuint32m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m1_u32m1 (vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m2_u32m1 (vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector, - vuint32m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m4_u32m1 (vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector, - vuint32m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m8_u32m1 (vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector, - vuint64m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m1_u64m1 (vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector, - vuint64m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m2_u64m1 (vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector, - vuint64m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m4_u64m1 (vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector, - vuint64m1_t scalar, size_t vl) { - return vredxor(dst, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m8_u64m1 (vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst, - vint8mf8_t vector, vint8m1_t scalar, - size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf8_i8m1_m (vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst, - vint8mf4_t vector, vint8m1_t scalar, - size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf4_i8m1_m (vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst, - vint8mf2_t vector, vint8m1_t scalar, - size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf2_i8m1_m (vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst, - vint8m1_t vector, vint8m1_t scalar, - size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m1_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst, - vint8m2_t vector, vint8m1_t scalar, - size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m2_i8m1_m (vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst, - vint8m4_t vector, vint8m1_t scalar, - size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m4_i8m1_m (vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst, - vint8m8_t vector, vint8m1_t scalar, - size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m8_i8m1_m (vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst, - vint16mf4_t vector, vint16m1_t scalar, - size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16mf4_i16m1_m (vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst, - vint16mf2_t vector, vint16m1_t scalar, - size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16mf2_i16m1_m (vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst, - vint16m1_t vector, vint16m1_t scalar, - size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m1_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst, - vint16m2_t vector, vint16m1_t scalar, - size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m2_i16m1_m (vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst, - vint16m4_t vector, vint16m1_t scalar, - size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m4_i16m1_m (vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst, - vint16m8_t vector, vint16m1_t scalar, - size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m8_i16m1_m (vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst, - vint32mf2_t vector, vint32m1_t scalar, - size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32mf2_i32m1_m (vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst, - vint32m1_t vector, vint32m1_t scalar, - size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m1_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst, - vint32m2_t vector, vint32m1_t scalar, - size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m2_i32m1_m (vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst, - vint32m4_t vector, vint32m1_t scalar, - size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m4_i32m1_m (vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst, - vint32m8_t vector, vint32m1_t scalar, - size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m8_i32m1_m (vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst, - vint64m1_t vector, vint64m1_t scalar, - size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m1_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst, - vint64m2_t vector, vint64m1_t scalar, - size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m2_i64m1_m (vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst, - vint64m4_t vector, vint64m1_t scalar, - size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m4_i64m1_m (vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst, - vint64m8_t vector, vint64m1_t scalar, - size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m8_i64m1_m (vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst, - vuint8mf8_t vector, vuint8m1_t scalar, - size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf8_u8m1_m (vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst, - vuint8mf4_t vector, vuint8m1_t scalar, - size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf4_u8m1_m (vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst, - vuint8mf2_t vector, vuint8m1_t scalar, - size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf2_u8m1_m (vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst, - vuint8m1_t vector, vuint8m1_t scalar, - size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m1_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst, - vuint8m2_t vector, vuint8m1_t scalar, - size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m2_u8m1_m (vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst, - vuint8m4_t vector, vuint8m1_t scalar, - size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m4_u8m1_m (vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst, - vuint8m8_t vector, vuint8m1_t scalar, - size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m8_u8m1_m (vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst, - vuint16mf4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16mf4_u16m1_m (vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst, - vuint16mf2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16mf2_u16m1_m (vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst, - vuint16m1_t vector, - vuint16m1_t scalar, size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m1_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst, - vuint16m2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m2_u16m1_m (vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst, - vuint16m4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m4_u16m1_m (vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst, - vuint16m8_t vector, - vuint16m1_t scalar, size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m8_u16m1_m (vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst, - vuint32mf2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32mf2_u32m1_m (vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst, - vuint32m1_t vector, - vuint32m1_t scalar, size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m1_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst, - vuint32m2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m2_u32m1_m (vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst, - vuint32m4_t vector, - vuint32m1_t scalar, size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m4_u32m1_m (vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst, - vuint32m8_t vector, - vuint32m1_t scalar, size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m8_u32m1_m (vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst, - vuint64m1_t vector, - vuint64m1_t scalar, size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m1_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst, - vuint64m2_t vector, - vuint64m1_t scalar, size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m2_u64m1_m (vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst, - vuint64m4_t vector, - vuint64m1_t scalar, size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m4_u64m1_m (vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst, - vuint64m8_t vector, - vuint64m1_t scalar, size_t vl) { - return vredxor(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m8_u64m1_m (vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8mf8_i8m1_tu (vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8mf4_i8m1_tu (vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8mf2_i8m1_tu (vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m1_i8m1_tu (vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m2_i8m1_tu (vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m4_i8m1_tu (vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m8_i8m1_tu (vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16mf4_i16m1_tu (vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16mf2_i16m1_tu (vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m1_i16m1_tu (vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m2_i16m1_tu (vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m4_i16m1_tu (vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m8_i16m1_tu (vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32mf2_i32m1_tu(vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_tu(merge, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32mf2_i32m1_tu (vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_tu( +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor_tu(merge, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m1_i32m1_tu (vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_ta( +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_ta(vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m2_i32m1_tu (vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_ta( +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor_ta(vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m4_i32m1_tu (vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_tum( +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_tum(mask, merge, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m8_i32m1_tu (vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_tum( +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m1_i64m1_tu (vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m2_i64m1_tu (vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m4_i64m1_tu (vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m8_i64m1_tu (vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8mf8_u8m1_tu (vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8mf4_u8m1_tu (vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8mf2_u8m1_tu (vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m1_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m2_u8m1_tu (vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m4_u8m1_tu (vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m8_u8m1_tu (vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16mf4_u16m1_tu (vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16mf2_u16m1_tu (vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m1_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m2_u16m1_tu (vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m4_u16m1_tu (vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m8_u16m1_tu (vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor_tum(mask, merge, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tu (vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_tam( +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_tam(mask, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m1_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_tam( +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m2_u32m1_tu (vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m4_u32m1_tu (vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { +vuint32m1_t test_vredxor_vs_u32m8_u32m1_tu (vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m1_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m2_u64m1_tu (vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m4_u64m1_tu (vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m8_u64m1_tu (vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8mf8_i8m1_ta (vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8mf4_i8m1_ta (vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8mf2_i8m1_ta (vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m1_i8m1_ta (vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m2_i8m1_ta (vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m4_i8m1_ta (vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m8_i8m1_ta (vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16mf4_i16m1_ta (vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16mf2_i16m1_ta (vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m1_i16m1_ta (vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m2_i16m1_ta (vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m4_i16m1_ta (vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m8_i16m1_ta (vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32mf2_i32m1_ta (vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m1_i32m1_ta (vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m2_i32m1_ta (vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m4_i32m1_ta (vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m8_i32m1_ta (vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m1_i64m1_ta (vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m2_i64m1_ta (vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m4_i64m1_ta (vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m8_i64m1_ta (vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8mf8_u8m1_ta (vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8mf4_u8m1_ta (vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8mf2_u8m1_ta (vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m1_u8m1_ta (vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m2_u8m1_ta (vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m4_u8m1_ta (vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m8_u8m1_ta (vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16mf4_u16m1_ta (vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16mf2_u16m1_ta (vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m1_u16m1_ta (vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m2_u16m1_ta (vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m4_u16m1_ta (vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m8_u16m1_ta (vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32mf2_u32m1_ta (vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m1_u32m1_ta (vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m2_u32m1_ta (vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m4_u32m1_ta (vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m8_u32m1_ta (vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m1_u64m1_ta (vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m2_u64m1_ta (vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m4_u64m1_ta (vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m8_u64m1_ta (vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8mf8_i8m1_tum (vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8mf4_i8m1_tum (vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8mf2_i8m1_tum (vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m1_i8m1_tum (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m2_i8m1_tum (vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m4_i8m1_tum (vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m8_i8m1_tum (vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16mf4_i16m1_tum (vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16mf2_i16m1_tum (vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m1_i16m1_tum (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m2_i16m1_tum (vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m4_i16m1_tum (vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m8_i16m1_tum (vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32mf2_i32m1_tum (vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m1_i32m1_tum (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m2_i32m1_tum (vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m4_i32m1_tum (vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m8_i32m1_tum (vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m1_i64m1_tum (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m2_i64m1_tum (vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m4_i64m1_tum (vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m8_i64m1_tum (vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8mf8_u8m1_tum (vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8mf4_u8m1_tum (vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8mf2_u8m1_tum (vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m1_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m2_u8m1_tum (vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m4_u8m1_tum (vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m8_u8m1_tum (vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16mf4_u16m1_tum (vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16mf2_u16m1_tum (vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m1_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m2_u16m1_tum (vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m4_u16m1_tum (vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m8_u16m1_tum (vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tum (vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m1_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m2_u32m1_tum (vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m4_u32m1_tum (vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m8_u32m1_tum (vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m1_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m2_u64m1_tum (vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m4_u64m1_tum (vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m8_u64m1_tum (vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8mf8_i8m1_tam (vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8mf4_i8m1_tam (vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8mf2_i8m1_tam (vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m1_i8m1_tam (vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m2_i8m1_tam (vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m4_i8m1_tam (vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m8_i8m1_tam (vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16mf4_i16m1_tam (vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16mf2_i16m1_tam (vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m1_i16m1_tam (vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m2_i16m1_tam (vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m4_i16m1_tam (vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m8_i16m1_tam (vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32mf2_i32m1_tam (vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m1_i32m1_tam (vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m2_i32m1_tam (vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m4_i32m1_tam (vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m8_i32m1_tam (vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m1_i64m1_tam (vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m2_i64m1_tam (vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m4_i64m1_tam (vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m8_i64m1_tam (vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8mf8_u8m1_tam (vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8mf4_u8m1_tam (vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8mf2_u8m1_tam (vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m1_u8m1_tam (vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m2_u8m1_tam (vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m4_u8m1_tam (vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m8_u8m1_tam (vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16mf4_u16m1_tam (vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16mf2_u16m1_tam (vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m1_u16m1_tam (vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m2_u16m1_tam (vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m4_u16m1_tam (vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m8_u16m1_tam (vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tam (vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m1_u32m1_tam (vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m2_u32m1_tam (vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m4_u32m1_tam (vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m8_u32m1_tam (vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m1_u64m1_tam (vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m2_u64m1_tam (vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m4_u64m1_tam (vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m8_u64m1_tam (vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { return vredxor_tam(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwredsum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwredsum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwredsum.c @@ -6,828 +6,1944 @@ // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8mf8_i16m1(vint16m1_t dst, vint8mf8_t vector, - vint16m1_t scalar, size_t vl) { - return vwredsum(dst, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf8_i16m1 (vint8mf8_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8mf4_i16m1(vint16m1_t dst, vint8mf4_t vector, - vint16m1_t scalar, size_t vl) { - return vwredsum(dst, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf4_i16m1 (vint8mf4_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8mf2_i16m1(vint16m1_t dst, vint8mf2_t vector, - vint16m1_t scalar, size_t vl) { - return vwredsum(dst, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf2_i16m1 (vint8mf2_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m1_i16m1(vint16m1_t dst, vint8m1_t vector, - vint16m1_t scalar, size_t vl) { - return vwredsum(dst, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m1_i16m1 (vint8m1_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m2_i16m1(vint16m1_t dst, vint8m2_t vector, - vint16m1_t scalar, size_t vl) { - return vwredsum(dst, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m2_i16m1 (vint8m2_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m4_i16m1(vint16m1_t dst, vint8m4_t vector, - vint16m1_t scalar, size_t vl) { - return vwredsum(dst, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m4_i16m1 (vint8m4_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m8_i16m1(vint16m1_t dst, vint8m8_t vector, - vint16m1_t scalar, size_t vl) { - return vwredsum(dst, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m8_i16m1 (vint8m8_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16mf4_i32m1(vint32m1_t dst, vint16mf4_t vector, - vint32m1_t scalar, size_t vl) { - return vwredsum(dst, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16mf4_i32m1 (vint16mf4_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16mf2_i32m1(vint32m1_t dst, vint16mf2_t vector, - vint32m1_t scalar, size_t vl) { - return vwredsum(dst, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16mf2_i32m1 (vint16mf2_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m1_i32m1(vint32m1_t dst, vint16m1_t vector, - vint32m1_t scalar, size_t vl) { - return vwredsum(dst, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m1_i32m1 (vint16m1_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m2_i32m1(vint32m1_t dst, vint16m2_t vector, - vint32m1_t scalar, size_t vl) { - return vwredsum(dst, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m2_i32m1 (vint16m2_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m4_i32m1(vint32m1_t dst, vint16m4_t vector, - vint32m1_t scalar, size_t vl) { - return vwredsum(dst, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m4_i32m1 (vint16m4_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m8_i32m1(vint32m1_t dst, vint16m8_t vector, - vint32m1_t scalar, size_t vl) { - return vwredsum(dst, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m8_i32m1 (vint16m8_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32mf2_i64m1(vint64m1_t dst, vint32mf2_t vector, - vint64m1_t scalar, size_t vl) { - return vwredsum(dst, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32mf2_i64m1 (vint32mf2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m1_i64m1(vint64m1_t dst, vint32m1_t vector, - vint64m1_t scalar, size_t vl) { - return vwredsum(dst, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m1_i64m1 (vint32m1_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m2_i64m1(vint64m1_t dst, vint32m2_t vector, - vint64m1_t scalar, size_t vl) { - return vwredsum(dst, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m2_i64m1 (vint32m2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m4_i64m1(vint64m1_t dst, vint32m4_t vector, - vint64m1_t scalar, size_t vl) { - return vwredsum(dst, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m4_i64m1 (vint32m4_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m8_i64m1(vint64m1_t dst, vint32m8_t vector, - vint64m1_t scalar, size_t vl) { - return vwredsum(dst, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m8_i64m1 (vint32m8_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1(vuint16m1_t dst, vuint8mf8_t vector, - vuint16m1_t scalar, size_t vl) { - return vwredsumu(dst, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1 (vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1(vuint16m1_t dst, vuint8mf4_t vector, - vuint16m1_t scalar, size_t vl) { - return vwredsumu(dst, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1 (vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1(vuint16m1_t dst, vuint8mf2_t vector, - vuint16m1_t scalar, size_t vl) { - return vwredsumu(dst, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1 (vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m1_u16m1(vuint16m1_t dst, vuint8m1_t vector, - vuint16m1_t scalar, size_t vl) { - return vwredsumu(dst, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m1_u16m1 (vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m2_u16m1(vuint16m1_t dst, vuint8m2_t vector, - vuint16m1_t scalar, size_t vl) { - return vwredsumu(dst, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m2_u16m1 (vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m4_u16m1(vuint16m1_t dst, vuint8m4_t vector, - vuint16m1_t scalar, size_t vl) { - return vwredsumu(dst, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m4_u16m1 (vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m8_u16m1(vuint16m1_t dst, vuint8m8_t vector, - vuint16m1_t scalar, size_t vl) { - return vwredsumu(dst, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m8_u16m1 (vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1(vuint32m1_t dst, vuint16mf4_t vector, - vuint32m1_t scalar, size_t vl) { - return vwredsumu(dst, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1 (vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1(vuint32m1_t dst, vuint16mf2_t vector, - vuint32m1_t scalar, size_t vl) { - return vwredsumu(dst, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1 (vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m1_u32m1(vuint32m1_t dst, vuint16m1_t vector, - vuint32m1_t scalar, size_t vl) { - return vwredsumu(dst, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m1_u32m1 (vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m2_u32m1(vuint32m1_t dst, vuint16m2_t vector, - vuint32m1_t scalar, size_t vl) { - return vwredsumu(dst, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m2_u32m1 (vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m4_u32m1(vuint32m1_t dst, vuint16m4_t vector, - vuint32m1_t scalar, size_t vl) { - return vwredsumu(dst, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m4_u32m1 (vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m8_u32m1(vuint32m1_t dst, vuint16m8_t vector, - vuint32m1_t scalar, size_t vl) { - return vwredsumu(dst, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m8_u32m1 (vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1(vuint64m1_t dst, vuint32mf2_t vector, - vuint64m1_t scalar, size_t vl) { - return vwredsumu(dst, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1 (vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m1_u64m1(vuint64m1_t dst, vuint32m1_t vector, - vuint64m1_t scalar, size_t vl) { - return vwredsumu(dst, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m1_u64m1 (vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m2_u64m1(vuint64m1_t dst, vuint32m2_t vector, - vuint64m1_t scalar, size_t vl) { - return vwredsumu(dst, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m2_u64m1 (vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m4_u64m1(vuint64m1_t dst, vuint32m4_t vector, - vuint64m1_t scalar, size_t vl) { - return vwredsumu(dst, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m4_u64m1 (vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m8_u64m1(vuint64m1_t dst, vuint32m8_t vector, - vuint64m1_t scalar, size_t vl) { - return vwredsumu(dst, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m8_u64m1 (vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8mf8_i16m1_m(vbool64_t mask, vint16m1_t dst, - vint8mf8_t vector, vint16m1_t scalar, - size_t vl) { - return vwredsum(mask, dst, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf8_i16m1_m (vbool64_t mask, vint16m1_t maskedoff, vint8mf8_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8mf4_i16m1_m(vbool32_t mask, vint16m1_t dst, - vint8mf4_t vector, vint16m1_t scalar, - size_t vl) { - return vwredsum(mask, dst, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf4_i16m1_m (vbool32_t mask, vint16m1_t maskedoff, vint8mf4_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8mf2_i16m1_m(vbool16_t mask, vint16m1_t dst, - vint8mf2_t vector, vint16m1_t scalar, - size_t vl) { - return vwredsum(mask, dst, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf2_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m1_i16m1_m(vbool8_t mask, vint16m1_t dst, - vint8m1_t vector, vint16m1_t scalar, - size_t vl) { - return vwredsum(mask, dst, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m1_i16m1_m (vbool8_t mask, vint16m1_t maskedoff, vint8m1_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m2_i16m1_m(vbool4_t mask, vint16m1_t dst, - vint8m2_t vector, vint16m1_t scalar, - size_t vl) { - return vwredsum(mask, dst, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m2_i16m1_m (vbool4_t mask, vint16m1_t maskedoff, vint8m2_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m4_i16m1_m(vbool2_t mask, vint16m1_t dst, - vint8m4_t vector, vint16m1_t scalar, - size_t vl) { - return vwredsum(mask, dst, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m4_i16m1_m (vbool2_t mask, vint16m1_t maskedoff, vint8m4_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m8_i16m1_m(vbool1_t mask, vint16m1_t dst, - vint8m8_t vector, vint16m1_t scalar, - size_t vl) { - return vwredsum(mask, dst, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m8_i16m1_m (vbool1_t mask, vint16m1_t maskedoff, vint8m8_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16mf4_i32m1_m(vbool64_t mask, vint32m1_t dst, - vint16mf4_t vector, - vint32m1_t scalar, size_t vl) { - return vwredsum(mask, dst, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16mf4_i32m1_m (vbool64_t mask, vint32m1_t maskedoff, vint16mf4_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16mf2_i32m1_m(vbool32_t mask, vint32m1_t dst, - vint16mf2_t vector, - vint32m1_t scalar, size_t vl) { - return vwredsum(mask, dst, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16mf2_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m1_i32m1_m(vbool16_t mask, vint32m1_t dst, - vint16m1_t vector, vint32m1_t scalar, - size_t vl) { - return vwredsum(mask, dst, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m1_i32m1_m (vbool16_t mask, vint32m1_t maskedoff, vint16m1_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m2_i32m1_m(vbool8_t mask, vint32m1_t dst, - vint16m2_t vector, vint32m1_t scalar, - size_t vl) { - return vwredsum(mask, dst, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m2_i32m1_m (vbool8_t mask, vint32m1_t maskedoff, vint16m2_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m4_i32m1_m(vbool4_t mask, vint32m1_t dst, - vint16m4_t vector, vint32m1_t scalar, - size_t vl) { - return vwredsum(mask, dst, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m4_i32m1_m (vbool4_t mask, vint32m1_t maskedoff, vint16m4_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m8_i32m1_m(vbool2_t mask, vint32m1_t dst, - vint16m8_t vector, vint32m1_t scalar, - size_t vl) { - return vwredsum(mask, dst, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m8_i32m1_m (vbool2_t mask, vint32m1_t maskedoff, vint16m8_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32mf2_i64m1_m(vbool64_t mask, vint64m1_t dst, - vint32mf2_t vector, - vint64m1_t scalar, size_t vl) { - return vwredsum(mask, dst, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32mf2_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m1_i64m1_m(vbool32_t mask, vint64m1_t dst, - vint32m1_t vector, vint64m1_t scalar, - size_t vl) { - return vwredsum(mask, dst, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m1_i64m1_m (vbool32_t mask, vint64m1_t maskedoff, vint32m1_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m2_i64m1_m(vbool16_t mask, vint64m1_t dst, - vint32m2_t vector, vint64m1_t scalar, - size_t vl) { - return vwredsum(mask, dst, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m2_i64m1_m (vbool16_t mask, vint64m1_t maskedoff, vint32m2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m4_i64m1_m(vbool8_t mask, vint64m1_t dst, - vint32m4_t vector, vint64m1_t scalar, - size_t vl) { - return vwredsum(mask, dst, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m4_i64m1_m (vbool8_t mask, vint64m1_t maskedoff, vint32m4_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m8_i64m1_m(vbool4_t mask, vint64m1_t dst, - vint32m8_t vector, vint64m1_t scalar, - size_t vl) { - return vwredsum(mask, dst, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m8_i64m1_m (vbool4_t mask, vint64m1_t maskedoff, vint32m8_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_m(vbool64_t mask, vuint16m1_t dst, - vuint8mf8_t vector, - vuint16m1_t scalar, size_t vl) { - return vwredsumu(mask, dst, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_m (vbool64_t mask, vuint16m1_t maskedoff, vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_m(vbool32_t mask, vuint16m1_t dst, - vuint8mf4_t vector, - vuint16m1_t scalar, size_t vl) { - return vwredsumu(mask, dst, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_m (vbool32_t mask, vuint16m1_t maskedoff, vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_m(vbool16_t mask, vuint16m1_t dst, - vuint8mf2_t vector, - vuint16m1_t scalar, size_t vl) { - return vwredsumu(mask, dst, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_m(vbool8_t mask, vuint16m1_t dst, - vuint8m1_t vector, - vuint16m1_t scalar, size_t vl) { - return vwredsumu(mask, dst, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_m (vbool8_t mask, vuint16m1_t maskedoff, vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_m(vbool4_t mask, vuint16m1_t dst, - vuint8m2_t vector, - vuint16m1_t scalar, size_t vl) { - return vwredsumu(mask, dst, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_m (vbool4_t mask, vuint16m1_t maskedoff, vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_m(vbool2_t mask, vuint16m1_t dst, - vuint8m4_t vector, - vuint16m1_t scalar, size_t vl) { - return vwredsumu(mask, dst, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_m (vbool2_t mask, vuint16m1_t maskedoff, vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_m(vbool1_t mask, vuint16m1_t dst, - vuint8m8_t vector, - vuint16m1_t scalar, size_t vl) { - return vwredsumu(mask, dst, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_m (vbool1_t mask, vuint16m1_t maskedoff, vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_m(vbool64_t mask, vuint32m1_t dst, - vuint16mf4_t vector, - vuint32m1_t scalar, size_t vl) { - return vwredsumu(mask, dst, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_m (vbool64_t mask, vuint32m1_t maskedoff, vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_m(vbool32_t mask, vuint32m1_t dst, - vuint16mf2_t vector, - vuint32m1_t scalar, size_t vl) { - return vwredsumu(mask, dst, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_m(vbool16_t mask, vuint32m1_t dst, - vuint16m1_t vector, - vuint32m1_t scalar, size_t vl) { - return vwredsumu(mask, dst, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_m (vbool16_t mask, vuint32m1_t maskedoff, vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_m(vbool8_t mask, vuint32m1_t dst, - vuint16m2_t vector, - vuint32m1_t scalar, size_t vl) { - return vwredsumu(mask, dst, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_m (vbool8_t mask, vuint32m1_t maskedoff, vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_m(vbool4_t mask, vuint32m1_t dst, - vuint16m4_t vector, - vuint32m1_t scalar, size_t vl) { - return vwredsumu(mask, dst, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_m (vbool4_t mask, vuint32m1_t maskedoff, vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_m(vbool2_t mask, vuint32m1_t dst, - vuint16m8_t vector, - vuint32m1_t scalar, size_t vl) { - return vwredsumu(mask, dst, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_m (vbool2_t mask, vuint32m1_t maskedoff, vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_m(vbool64_t mask, vuint64m1_t dst, - vuint32mf2_t vector, - vuint64m1_t scalar, size_t vl) { - return vwredsumu(mask, dst, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_m(vbool32_t mask, vuint64m1_t dst, - vuint32m1_t vector, - vuint64m1_t scalar, size_t vl) { - return vwredsumu(mask, dst, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_m (vbool32_t mask, vuint64m1_t maskedoff, vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_m(vbool16_t mask, vuint64m1_t dst, - vuint32m2_t vector, - vuint64m1_t scalar, size_t vl) { - return vwredsumu(mask, dst, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_m (vbool16_t mask, vuint64m1_t maskedoff, vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_m(vbool8_t mask, vuint64m1_t dst, - vuint32m4_t vector, - vuint64m1_t scalar, size_t vl) { - return vwredsumu(mask, dst, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_m (vbool8_t mask, vuint64m1_t maskedoff, vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_m(vbool4_t mask, vuint64m1_t dst, - vuint32m8_t vector, - vuint64m1_t scalar, size_t vl) { - return vwredsumu(mask, dst, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_m (vbool4_t mask, vuint64m1_t maskedoff, vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8mf8_i16m1_tu (vint16m1_t maskedoff, vint8mf8_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8mf4_i16m1_tu (vint16m1_t maskedoff, vint8mf4_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8mf2_i16m1_tu (vint16m1_t maskedoff, vint8mf2_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m1_i16m1_tu (vint16m1_t maskedoff, vint8m1_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m2_i16m1_tu (vint16m1_t maskedoff, vint8m2_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m4_i16m1_tu (vint16m1_t maskedoff, vint8m4_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m8_i16m1_tu (vint16m1_t maskedoff, vint8m8_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16mf4_i32m1_tu (vint32m1_t maskedoff, vint16mf4_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16mf2_i32m1_tu (vint32m1_t maskedoff, vint16mf2_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m1_i32m1_tu (vint32m1_t maskedoff, vint16m1_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m2_i32m1_tu (vint32m1_t maskedoff, vint16m2_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m4_i32m1_tu (vint32m1_t maskedoff, vint16m4_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m8_i32m1_tu (vint32m1_t maskedoff, vint16m8_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tu (vint64m1_t maskedoff, vint32mf2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m1_i64m1_tu (vint64m1_t maskedoff, vint32m1_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m2_i64m1_tu (vint64m1_t maskedoff, vint32m2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m4_i64m1_tu (vint64m1_t maskedoff, vint32m4_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tu(vint64m1_t merge, vint32mf2_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum_tu(merge, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m8_i64m1_tu (vint64m1_t maskedoff, vint32m8_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_tu (vuint16m1_t maskedoff, vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_tu (vuint16m1_t maskedoff, vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_tu (vuint16m1_t maskedoff, vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_tu (vuint16m1_t maskedoff, vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_tu (vuint16m1_t maskedoff, vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_tu (vuint16m1_t maskedoff, vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_tu (vuint16m1_t maskedoff, vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_tu (vuint32m1_t maskedoff, vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_tu (vuint32m1_t maskedoff, vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_tu (vuint32m1_t maskedoff, vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_tu (vuint32m1_t maskedoff, vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_tu (vuint32m1_t maskedoff, vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_tu (vuint32m1_t maskedoff, vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tu(vuint64m1_t merge, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu_tu(merge, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tu (vuint64m1_t maskedoff, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_ta( +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32mf2_i64m1_ta(vint32mf2_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum_ta(vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_tu (vuint64m1_t maskedoff, vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_ta( +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_ta(vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu_ta(vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_tu (vuint64m1_t maskedoff, vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_tum( +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tum(vbool64_t mask, vint64m1_t merge, vint32mf2_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum_tum(mask, merge, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_tu (vuint64m1_t maskedoff, vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_tum( +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tum(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu_tum(mask, merge, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_tu (vuint64m1_t maskedoff, vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_tam( +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8mf8_i16m1_ta (vint8mf8_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8mf4_i16m1_ta (vint8mf4_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8mf2_i16m1_ta (vint8mf2_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m1_i16m1_ta (vint8m1_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m2_i16m1_ta (vint8m2_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m4_i16m1_ta (vint8m4_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m8_i16m1_ta (vint8m8_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16mf4_i32m1_ta (vint16mf4_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16mf2_i32m1_ta (vint16mf2_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m1_i32m1_ta (vint16m1_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m2_i32m1_ta (vint16m2_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m4_i32m1_ta (vint16m4_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m8_i32m1_ta (vint16m8_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tam(vbool64_t mask, vint32mf2_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum_tam(mask, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32mf2_i64m1_ta (vint32mf2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_tam( +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m1_i64m1_ta (vint32m1_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m2_i64m1_ta (vint32m2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m4_i64m1_ta (vint32m4_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m8_i64m1_ta (vint32m8_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_ta (vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_ta (vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_ta (vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_ta (vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_ta (vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_ta (vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_ta (vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_ta (vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_ta (vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_ta (vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_ta (vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_ta (vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_ta (vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_ta (vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_ta (vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_ta (vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_ta (vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_ta (vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8mf8_i16m1_tum (vbool64_t mask, vint16m1_t maskedoff, vint8mf8_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8mf4_i16m1_tum (vbool32_t mask, vint16m1_t maskedoff, vint8mf4_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8mf2_i16m1_tum (vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m1_i16m1_tum (vbool8_t mask, vint16m1_t maskedoff, vint8m1_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m2_i16m1_tum (vbool4_t mask, vint16m1_t maskedoff, vint8m2_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m4_i16m1_tum (vbool2_t mask, vint16m1_t maskedoff, vint8m4_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m8_i16m1_tum (vbool1_t mask, vint16m1_t maskedoff, vint8m8_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16mf4_i32m1_tum (vbool64_t mask, vint32m1_t maskedoff, vint16mf4_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16mf2_i32m1_tum (vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m1_i32m1_tum (vbool16_t mask, vint32m1_t maskedoff, vint16m1_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m2_i32m1_tum (vbool8_t mask, vint32m1_t maskedoff, vint16m2_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m4_i32m1_tum (vbool4_t mask, vint32m1_t maskedoff, vint16m4_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m8_i32m1_tum (vbool2_t mask, vint32m1_t maskedoff, vint16m8_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tum (vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m1_i64m1_tum (vbool32_t mask, vint64m1_t maskedoff, vint32m1_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m2_i64m1_tum (vbool16_t mask, vint64m1_t maskedoff, vint32m2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m4_i64m1_tum (vbool8_t mask, vint64m1_t maskedoff, vint32m4_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m8_i64m1_tum (vbool4_t mask, vint64m1_t maskedoff, vint32m8_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_tum (vbool64_t mask, vuint16m1_t maskedoff, vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_tum (vbool32_t mask, vuint16m1_t maskedoff, vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_tum (vbool8_t mask, vuint16m1_t maskedoff, vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_tum (vbool4_t mask, vuint16m1_t maskedoff, vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_tum (vbool2_t mask, vuint16m1_t maskedoff, vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_tum (vbool1_t mask, vuint16m1_t maskedoff, vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_tum (vbool64_t mask, vuint32m1_t maskedoff, vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_tum (vbool16_t mask, vuint32m1_t maskedoff, vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_tum (vbool8_t mask, vuint32m1_t maskedoff, vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_tum (vbool4_t mask, vuint32m1_t maskedoff, vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_tum (vbool2_t mask, vuint32m1_t maskedoff, vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_tum (vbool32_t mask, vuint64m1_t maskedoff, vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_tum (vbool16_t mask, vuint64m1_t maskedoff, vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_tum (vbool8_t mask, vuint64m1_t maskedoff, vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_tum (vbool4_t mask, vuint64m1_t maskedoff, vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8mf8_i16m1_tam (vbool64_t mask, vint8mf8_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8mf4_i16m1_tam (vbool32_t mask, vint8mf4_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8mf2_i16m1_tam (vbool16_t mask, vint8mf2_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m1_i16m1_tam (vbool8_t mask, vint8m1_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m2_i16m1_tam (vbool4_t mask, vint8m2_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m4_i16m1_tam (vbool2_t mask, vint8m4_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m8_i16m1_tam (vbool1_t mask, vint8m8_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16mf4_i32m1_tam (vbool64_t mask, vint16mf4_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16mf2_i32m1_tam (vbool32_t mask, vint16mf2_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m1_i32m1_tam (vbool16_t mask, vint16m1_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m2_i32m1_tam (vbool8_t mask, vint16m2_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m4_i32m1_tam (vbool4_t mask, vint16m4_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m8_i32m1_tam (vbool2_t mask, vint16m8_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tam (vbool64_t mask, vint32mf2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m1_i64m1_tam (vbool32_t mask, vint32m1_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m2_i64m1_tam (vbool16_t mask, vint32m2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m4_i64m1_tam (vbool8_t mask, vint32m4_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m8_i64m1_tam (vbool4_t mask, vint32m8_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_tam (vbool64_t mask, vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_tam (vbool32_t mask, vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_tam (vbool16_t mask, vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_tam (vbool8_t mask, vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_tam (vbool4_t mask, vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_tam (vbool2_t mask, vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_tam (vbool1_t mask, vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_tam (vbool64_t mask, vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_tam (vbool32_t mask, vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_tam (vbool16_t mask, vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_tam (vbool8_t mask, vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_tam (vbool4_t mask, vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_tam (vbool2_t mask, vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tam (vbool64_t mask, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_tam (vbool32_t mask, vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_tam (vbool16_t mask, vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_tam (vbool8_t mask, vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { +vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_tam (vbool4_t mask, vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { return vwredsumu_tam(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmax.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmax.c @@ -6,311 +6,463 @@ #include +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf4_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1 (vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16mf4_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf2_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1 (vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16mf2_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m1_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m1_f16m1 (vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16m1_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m2_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m2_f16m1 (vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16m2_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m4_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m4_f16m1 (vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16m4_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m8_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m8_f16m1 (vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16m8_f16m1(vector, scalar, vl); +} + // CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1(vfloat32m1_t dst, - vfloat32mf2_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32mf2_f32m1(dst, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1 (vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32mf2_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32m1_f32m1(dst, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m1_f32m1 (vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m1_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32m2_f32m1(dst, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m2_f32m1 (vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m2_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32m4_f32m1(dst, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m4_f32m1 (vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m4_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32m8_f32m1(dst, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m8_f32m1 (vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m8_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredmax_vs_f64m1_f64m1(dst, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m1_f64m1 (vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m1_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredmax_vs_f64m2_f64m1(dst, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m2_f64m1 (vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m2_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredmax_vs_f64m4_f64m1(dst, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m4_f64m1 (vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m4_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredmax_vs_f64m8_f64m1(dst, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m8_f64m1 (vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m8_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf4_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_m (vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16mf4_f16m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf2_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_m (vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16mf2_f16m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m1_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16m1_f16m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m2_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_m (vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16m2_f16m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m4_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_m (vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16m4_f16m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m8_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_m (vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16m8_f16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst, - vfloat32mf2_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32mf2_f32m1_m(mask, dst, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_m (vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32mf2_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst, - vfloat32m1_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32m1_f32m1_m(mask, dst, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m1_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst, - vfloat32m2_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32m2_f32m1_m(mask, dst, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_m (vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m2_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst, - vfloat32m4_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32m4_f32m1_m(mask, dst, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_m (vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m4_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst, - vfloat32m8_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32m8_f32m1_m(mask, dst, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_m (vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m8_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst, - vfloat64m1_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredmax_vs_f64m1_f64m1_m(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m1_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst, - vfloat64m2_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredmax_vs_f64m2_f64m1_m(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_m (vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m2_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst, - vfloat64m4_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredmax_vs_f64m4_f64m1_m(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_m (vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m4_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst, - vfloat64m8_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredmax_vs_f64m8_f64m1_m(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_m (vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m8_f64m1_m(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf4_f16m1( +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf4_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv1f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1 (vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16mf4_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_tu (vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16mf4_f16m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf2_f16m1( +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf2_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv2f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1 (vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16mf2_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_tu (vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16mf2_f16m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m1_f16m1( +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m1_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv4f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16m1_f16m1 (vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16m1_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16m1_f16m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m2_f16m1( +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m2_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv8f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16m2_f16m1 (vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16m2_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16m2_f16m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m4_f16m1( +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m4_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv16f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16m4_f16m1 (vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16m4_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16m4_f16m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m8_f16m1( +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m8_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv32f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16m8_f16m1 (vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16m8_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16m8_f16m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf4_f16m1_m( +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_m (vbool64_t mask, vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16mf4_f16m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tu (vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32mf2_f32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf2_f16m1_m( +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m1_f32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m2_f32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m4_f32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m8_f32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m1_f64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m2_f64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m4_f64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m8_f64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf4_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_m (vbool32_t mask, vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16mf2_f16m1_m(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_ta (vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16mf4_f16m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m1_f16m1_m( +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf2_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_m (vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16m1_f16m1_m(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_ta (vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16mf2_f16m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m2_f16m1_m( +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m1_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_m (vbool8_t mask, vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16m2_f16m1_m(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_ta (vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16m1_f16m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m4_f16m1_m( +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m2_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_m (vbool4_t mask, vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16m4_f16m1_m(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_ta (vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16m2_f16m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m8_f16m1_m( +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m4_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_m (vbool2_t mask, vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_ta (vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16m4_f16m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_tu( +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m8_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tu(vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32mf2_f32m1_tu(merge, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_ta (vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16m8_f16m1_ta(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_ta( @@ -318,17 +470,269 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_ta(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { +vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_ta (vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { return vfredmax_vs_f32mf2_f32m1_ta(vector, scalar, vl); } +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_ta (vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m1_f32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_ta (vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m2_f32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_ta (vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m4_f32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_ta (vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m8_f32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_ta (vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m1_f64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_ta (vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m2_f64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_ta (vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m4_f64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_ta (vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m8_f64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf4_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_tum (vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16mf4_f16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf2_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_tum (vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16mf2_f16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m1_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_tum (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16m1_f16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m2_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_tum (vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16m2_f16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m4_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_tum (vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16m4_f16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m8_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_tum (vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16m8_f16m1_tum(mask, maskedoff, vector, scalar, vl); +} + // CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_tum( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tum (vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32mf2_f32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_tum (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m1_f32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32mf2_f32m1_tum(mask, merge, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_tum (vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m2_f32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_tum (vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m4_f32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_tum (vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m8_f32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_tum (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m1_f64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_tum (vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m2_f64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_tum (vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m4_f64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_tum (vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m8_f64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf4_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_tam (vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16mf4_f16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf2_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_tam (vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16mf2_f16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m1_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_tam (vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16m1_f16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m2_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_tam (vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16m2_f16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m4_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_tam (vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16m4_f16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m8_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_tam (vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16m8_f16m1_tam(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_tam( @@ -336,6 +740,78 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { +vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tam (vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { return vfredmax_vs_f32mf2_f32m1_tam(mask, vector, scalar, vl); } + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_tam (vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m1_f32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_tam (vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m2_f32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_tam (vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m4_f32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_tam (vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m8_f32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_tam (vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m1_f64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_tam (vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m2_f64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_tam (vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m4_f64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_tam (vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m8_f64m1_tam(mask, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmin.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmin.c @@ -6,311 +6,463 @@ #include +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf4_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1 (vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16mf4_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf2_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1 (vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16mf2_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m1_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m1_f16m1 (vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16m1_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m2_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m2_f16m1 (vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16m2_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m4_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m4_f16m1 (vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16m4_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m8_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m8_f16m1 (vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16m8_f16m1(vector, scalar, vl); +} + // CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1(vfloat32m1_t dst, - vfloat32mf2_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32mf2_f32m1(dst, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1 (vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32mf2_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32m1_f32m1(dst, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m1_f32m1 (vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m1_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32m2_f32m1(dst, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m2_f32m1 (vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m2_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32m4_f32m1(dst, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m4_f32m1 (vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m4_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32m8_f32m1(dst, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m8_f32m1 (vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m8_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredmin_vs_f64m1_f64m1(dst, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m1_f64m1 (vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m1_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredmin_vs_f64m2_f64m1(dst, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m2_f64m1 (vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m2_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredmin_vs_f64m4_f64m1(dst, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m4_f64m1 (vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m4_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredmin_vs_f64m8_f64m1(dst, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m8_f64m1 (vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m8_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf4_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_m (vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16mf4_f16m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf2_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_m (vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16mf2_f16m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m1_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16m1_f16m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m2_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_m (vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16m2_f16m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m4_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_m (vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16m4_f16m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m8_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_m (vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16m8_f16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst, - vfloat32mf2_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32mf2_f32m1_m(mask, dst, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_m (vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32mf2_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst, - vfloat32m1_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32m1_f32m1_m(mask, dst, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m1_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst, - vfloat32m2_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32m2_f32m1_m(mask, dst, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_m (vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m2_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst, - vfloat32m4_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32m4_f32m1_m(mask, dst, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_m (vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m4_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst, - vfloat32m8_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32m8_f32m1_m(mask, dst, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_m (vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m8_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst, - vfloat64m1_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredmin_vs_f64m1_f64m1_m(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m1_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst, - vfloat64m2_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredmin_vs_f64m2_f64m1_m(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_m (vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m2_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst, - vfloat64m4_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredmin_vs_f64m4_f64m1_m(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_m (vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m4_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst, - vfloat64m8_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredmin_vs_f64m8_f64m1_m(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_m (vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m8_f64m1_m(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf4_f16m1( +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf4_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv1f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1 (vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16mf4_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_tu (vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16mf4_f16m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf2_f16m1( +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf2_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv2f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1 (vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16mf2_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_tu (vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16mf2_f16m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m1_f16m1( +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m1_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv4f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16m1_f16m1 (vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16m1_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16m1_f16m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m2_f16m1( +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m2_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv8f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16m2_f16m1 (vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16m2_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16m2_f16m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m4_f16m1( +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m4_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv16f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16m4_f16m1 (vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16m4_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16m4_f16m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m8_f16m1( +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m8_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv32f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16m8_f16m1 (vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16m8_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16m8_f16m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf4_f16m1_m( +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_m (vbool64_t mask, vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16mf4_f16m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tu (vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32mf2_f32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf2_f16m1_m( +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m1_f32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m2_f32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m4_f32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m8_f32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m1_f64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m2_f64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m4_f64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m8_f64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf4_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_m (vbool32_t mask, vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16mf2_f16m1_m(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_ta (vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16mf4_f16m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m1_f16m1_m( +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf2_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_m (vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16m1_f16m1_m(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_ta (vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16mf2_f16m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m2_f16m1_m( +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m1_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_m (vbool8_t mask, vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16m2_f16m1_m(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_ta (vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16m1_f16m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m4_f16m1_m( +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m2_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_m (vbool4_t mask, vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16m4_f16m1_m(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_ta (vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16m2_f16m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m8_f16m1_m( +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m4_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_m (vbool2_t mask, vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_ta (vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16m4_f16m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_tu( +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m8_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tu(vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32mf2_f32m1_tu(merge, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_ta (vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16m8_f16m1_ta(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_ta( @@ -318,17 +470,269 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_ta(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { +vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_ta (vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { return vfredmin_vs_f32mf2_f32m1_ta(vector, scalar, vl); } +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_ta (vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m1_f32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_ta (vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m2_f32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_ta (vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m4_f32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_ta (vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m8_f32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_ta (vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m1_f64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_ta (vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m2_f64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_ta (vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m4_f64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_ta (vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m8_f64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf4_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_tum (vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16mf4_f16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf2_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_tum (vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16mf2_f16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m1_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_tum (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16m1_f16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m2_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_tum (vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16m2_f16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m4_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_tum (vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16m4_f16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m8_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_tum (vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16m8_f16m1_tum(mask, maskedoff, vector, scalar, vl); +} + // CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_tum( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tum (vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32mf2_f32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_tum (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m1_f32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32mf2_f32m1_tum(mask, merge, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_tum (vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m2_f32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_tum (vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m4_f32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_tum (vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m8_f32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_tum (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m1_f64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_tum (vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m2_f64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_tum (vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m4_f64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_tum (vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m8_f64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf4_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_tam (vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16mf4_f16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf2_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_tam (vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16mf2_f16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m1_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_tam (vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16m1_f16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m2_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_tam (vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16m2_f16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m4_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_tam (vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16m4_f16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m8_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_tam (vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16m8_f16m1_tam(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_tam( @@ -336,6 +740,78 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { +vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tam (vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { return vfredmin_vs_f32mf2_f32m1_tam(mask, vector, scalar, vl); } + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_tam (vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m1_f32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_tam (vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m2_f32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_tam (vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m4_f32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_tam (vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m8_f32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_tam (vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m1_f64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_tam (vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m2_f64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_tam (vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m4_f64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_tam (vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m8_f64m1_tam(mask, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c @@ -6,635 +6,1543 @@ #include +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf4_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1 (vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16mf4_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf2_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1 (vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16mf2_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m1_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1 (vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m1_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m2_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1 (vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m2_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m4_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1 (vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m4_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m8_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1 (vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m8_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1 (vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32mf2_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1 (vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m1_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1 (vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m2_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1 (vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m4_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1 (vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m8_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1 (vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m1_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1 (vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m2_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1 (vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m4_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1 (vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m8_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf4_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_m (vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16mf4_f16m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf2_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_m (vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16mf2_f16m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m1_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m1_f16m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m2_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_m (vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m2_f16m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m4_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_m (vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m4_f16m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m8_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_m (vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m8_f16m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_m (vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32mf2_f32m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m1_f32m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_m (vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m2_f32m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_m (vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m4_f32m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_m (vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m8_f32m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m1_f64m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_m (vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m2_f64m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_m (vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m4_f64m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_m (vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m8_f64m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf4_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1 (vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16mf4_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf2_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1 (vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16mf2_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m1_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m1_f16m1 (vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m1_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m2_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m2_f16m1 (vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m2_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m4_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m4_f16m1 (vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m4_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m8_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m8_f16m1 (vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m8_f16m1(vector, scalar, vl); +} + // CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1 (vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32mf2_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1 (vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m1_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1 (vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m2_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1 (vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m4_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1 (vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m8_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1 (vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m1_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1 (vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m2_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1 (vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m4_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1 (vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m8_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf4_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_m (vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16mf4_f16m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf2_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_m (vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16mf2_f16m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m1_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m1_f16m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m2_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_m (vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m2_f16m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m4_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_m (vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m4_f16m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m8_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_m (vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m8_f16m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_m (vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32mf2_f32m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m1_f32m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_m (vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m2_f32m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_m (vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m4_f32m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_m (vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m8_f32m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m1_f64m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_m (vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m2_f64m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_m (vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m4_f64m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_m (vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m8_f64m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf4_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_tu (vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16mf4_f16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf2_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_tu (vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16mf2_f16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m1_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m1_f16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m2_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m2_f16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m4_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m4_f16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m8_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m8_f16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tu (vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32mf2_f32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m1_f32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m2_f32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m4_f32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m8_f32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m1_f64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m2_f64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m4_f64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m8_f64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf4_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_ta (vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16mf4_f16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf2_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_ta (vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16mf2_f16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m1_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_ta (vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m1_f16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m2_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_ta (vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m2_f16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m4_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_ta (vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m4_f16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m8_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_ta (vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m8_f16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_ta (vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32mf2_f32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_ta (vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m1_f32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_ta (vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m2_f32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_ta (vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m4_f32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1(vfloat32m1_t dst, - vfloat32mf2_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32mf2_f32m1(dst, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_ta (vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m8_f32m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32m1_f32m1(dst, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_ta (vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m1_f64m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32m2_f32m1(dst, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_ta (vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m2_f64m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32m4_f32m1(dst, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_ta (vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m4_f64m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32m8_f32m1(dst, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_ta (vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m8_f64m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf4_f16m1_tum( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredusum_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredusum_vs_f64m1_f64m1(dst, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_tum (vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16mf4_f16m1_tum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf2_f16m1_tum( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredusum_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredusum_vs_f64m2_f64m1(dst, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_tum (vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16mf2_f16m1_tum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m1_f16m1_tum( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredusum_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredusum_vs_f64m4_f64m1(dst, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_tum (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m1_f16m1_tum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m2_f16m1_tum( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredusum_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredusum_vs_f64m8_f64m1(dst, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_tum (vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m2_f16m1_tum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_m( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m4_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_tum (vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m4_f16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m8_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_tum (vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m8_f16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_tum( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst, - vfloat32mf2_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32mf2_f32m1_m(mask, dst, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tum (vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32mf2_f32m1_tum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1_m( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1_tum( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst, - vfloat32m1_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32m1_f32m1_m(mask, dst, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_tum (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m1_f32m1_tum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1_m( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1_tum( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst, - vfloat32m2_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32m2_f32m1_m(mask, dst, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_tum (vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m2_f32m1_tum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1_m( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1_tum( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst, - vfloat32m4_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32m4_f32m1_m(mask, dst, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_tum (vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m4_f32m1_tum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1_m( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1_tum( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst, - vfloat32m8_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32m8_f32m1_m(mask, dst, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_tum (vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m8_f32m1_tum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1_m( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1_tum( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst, - vfloat64m1_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredusum_vs_f64m1_f64m1_m(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_tum (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m1_f64m1_tum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1_m( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1_tum( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst, - vfloat64m2_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredusum_vs_f64m2_f64m1_m(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_tum (vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m2_f64m1_tum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1_m( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1_tum( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst, - vfloat64m4_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredusum_vs_f64m4_f64m1_m(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_tum (vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m4_f64m1_tum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1_m( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1_tum( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst, - vfloat64m8_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredusum_vs_f64m8_f64m1_m(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_tum (vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m8_f64m1_tum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf4_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_tam (vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16mf4_f16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf2_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_tam (vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16mf2_f16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m1_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_tam (vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m1_f16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m2_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_tam (vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m2_f16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m4_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_tam (vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m4_f16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m8_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_tam (vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m8_f16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_tam( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1(vfloat32m1_t dst, - vfloat32mf2_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32mf2_f32m1(dst, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tam (vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32mf2_f32m1_tam(mask, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1_tam( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m1_f32m1(vfloat32m1_t dst, - vfloat32m1_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32m1_f32m1(dst, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_tam (vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m1_f32m1_tam(mask, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1_tam( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m2_f32m1(vfloat32m1_t dst, - vfloat32m2_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32m2_f32m1(dst, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_tam (vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m2_f32m1_tam(mask, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1_tam( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m4_f32m1(vfloat32m1_t dst, - vfloat32m4_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32m4_f32m1(dst, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_tam (vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m4_f32m1_tam(mask, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1_tam( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m8_f32m1(vfloat32m1_t dst, - vfloat32m8_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32m8_f32m1(dst, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_tam (vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m8_f32m1_tam(mask, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1_tam( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m1_f64m1(vfloat64m1_t dst, - vfloat64m1_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredosum_vs_f64m1_f64m1(dst, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_tam (vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m1_f64m1_tam(mask, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1_tam( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m2_f64m1(vfloat64m1_t dst, - vfloat64m2_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredosum_vs_f64m2_f64m1(dst, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_tam (vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m2_f64m1_tam(mask, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1_tam( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m4_f64m1(vfloat64m1_t dst, - vfloat64m4_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredosum_vs_f64m4_f64m1(dst, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_tam (vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m4_f64m1_tam(mask, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1_tam( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m8_f64m1(vfloat64m1_t dst, - vfloat64m8_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredosum_vs_f64m8_f64m1(dst, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_tam (vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m8_f64m1_tam(mask, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf4_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_tu (vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16mf4_f16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf2_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_tu (vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16mf2_f16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m1_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m1_f16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m2_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m2_f16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m4_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m4_f16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m8_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m8_f16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst, - vfloat32mf2_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32mf2_f32m1_m(mask, dst, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tu (vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32mf2_f32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst, - vfloat32m1_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32m1_f32m1_m(mask, dst, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m1_f32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst, - vfloat32m2_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32m2_f32m1_m(mask, dst, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m2_f32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst, - vfloat32m4_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32m4_f32m1_m(mask, dst, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m4_f32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst, - vfloat32m8_t vector, - vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32m8_f32m1_m(mask, dst, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m8_f32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst, - vfloat64m1_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredosum_vs_f64m1_f64m1_m(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m1_f64m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst, - vfloat64m2_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredosum_vs_f64m2_f64m1_m(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m2_f64m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst, - vfloat64m4_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredosum_vs_f64m4_f64m1_m(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m4_f64m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst, - vfloat64m8_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfredosum_vs_f64m8_f64m1_m(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m8_f64m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf4_f16m1( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf4_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv1f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1(vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16mf4_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_ta (vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16mf4_f16m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf2_f16m1( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf2_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv2f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1(vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16mf2_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_ta (vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16mf2_f16m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m1_f16m1( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m1_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv4f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16m1_f16m1(vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16m1_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_ta (vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m1_f16m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m2_f16m1( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m2_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv8f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16m2_f16m1(vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16m2_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_ta (vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m2_f16m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m4_f16m1( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m4_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv16f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16m4_f16m1(vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16m4_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_ta (vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m4_f16m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m8_f16m1( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m8_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv32f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16m8_f16m1(vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16m8_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_ta (vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m8_f16m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf4_f16m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16mf4_f16m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_ta (vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32mf2_f32m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf2_f16m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16mf2_f16m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_ta (vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m1_f32m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m1_f16m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16m1_f16m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_ta (vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m2_f32m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m2_f16m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16m2_f16m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_ta (vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m4_f32m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m4_f16m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16m4_f16m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_ta (vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m8_f32m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m8_f16m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_ta (vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m1_f64m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf4_f16m1( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv1f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1(vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16mf4_f16m1(dest, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_ta (vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m2_f64m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf2_f16m1( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv2f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1(vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16mf2_f16m1(dest, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_ta (vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m4_f64m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m1_f16m1( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv4f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16m1_f16m1(vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16m1_f16m1(dest, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_ta (vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m8_f64m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m2_f16m1( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf4_f16m1_tum( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv8f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16m2_f16m1(vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16m2_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_tum (vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16mf4_f16m1_tum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m4_f16m1( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf2_f16m1_tum( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv16f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16m4_f16m1(vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16m4_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_tum (vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16mf2_f16m1_tum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m8_f16m1( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m1_f16m1_tum( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv32f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16m8_f16m1(vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16m8_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_tum (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m1_f16m1_tum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf4_f16m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m2_f16m1_tum( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16mf4_f16m1_m(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_tum (vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m2_f16m1_tum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf2_f16m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m4_f16m1_tum( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16mf2_f16m1_m(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_tum (vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m4_f16m1_tum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m1_f16m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m8_f16m1_tum( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16m1_f16m1_m(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_tum (vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m8_f16m1_tum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m2_f16m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tum (vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32mf2_f32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_tum (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m1_f32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_tum (vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m2_f32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_tum (vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m4_f32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_tum (vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m8_f32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_tum (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m1_f64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_tum (vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m2_f64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_tum (vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m4_f64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_tum (vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m8_f64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf4_f16m1_tam( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16m2_f16m1_m(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_tam (vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16mf4_f16m1_tam(mask, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m4_f16m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf2_f16m1_tam( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16m4_f16m1_m(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_tam (vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16mf2_f16m1_tam(mask, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m8_f16m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m1_f16m1_tam( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_tam (vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m1_f16m1_tam(mask, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_tu( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m2_f16m1_tam( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tu(vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32mf2_f32m1_tu(merge, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_tam (vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m2_f16m1_tam(mask, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_ta( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m4_f16m1_tam( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_ta(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32mf2_f32m1_ta(vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_tam (vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m4_f16m1_tam(mask, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_tum( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m8_f16m1_tam( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32mf2_f32m1_tum(mask, merge, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_tam (vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m8_f16m1_tam(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_tam( @@ -642,42 +1550,78 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tam (vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { return vfredusum_vs_f32mf2_f32m1_tam(mask, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_tu( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1_tam( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tu(vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32mf2_f32m1_tu(merge, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_tam (vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m1_f32m1_tam(mask, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_ta( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1_tam( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_ta(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32mf2_f32m1_ta(vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_tam (vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m2_f32m1_tam(mask, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_tum( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1_tam( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32mf2_f32m1_tum(mask, merge, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_tam (vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m4_f32m1_tam(mask, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_tam( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1_tam( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32mf2_f32m1_tam(mask, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_tam (vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m8_f32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_tam (vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m1_f64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_tam (vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m2_f64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_tam (vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m4_f64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_tam (vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m8_f64m1_tam(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c @@ -6,510 +6,1478 @@ #include -// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1( +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1(vfloat64m1_t dst, - vfloat32mf2_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32mf2_f64m1(dst, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1 (vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_vs_f16mf4_f32m1(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1( +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1(vfloat64m1_t dst, - vfloat32m1_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32m1_f64m1(dst, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1 (vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_vs_f16mf2_f32m1(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1( +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m1_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1(vfloat64m1_t dst, - vfloat32m2_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32m2_f64m1(dst, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1 (vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_vs_f16m1_f32m1(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1( +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1(vfloat64m1_t dst, - vfloat32m4_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32m4_f64m1(dst, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1 (vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_vs_f16m2_f32m1(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1( +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1(vfloat64m1_t dst, - vfloat32m8_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32m8_f64m1(dst, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1 (vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_vs_f16m4_f32m1(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_m( +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m8_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1 (vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_vs_f16m8_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst, - vfloat32mf2_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32mf2_f64m1_m(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1 (vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_vs_f32mf2_f64m1(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1_m( +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst, - vfloat32m1_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32m1_f64m1_m(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1 (vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_vs_f32m1_f64m1(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1_m( +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst, - vfloat32m2_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32m2_f64m1_m(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1 (vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_vs_f32m2_f64m1(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1_m( +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst, - vfloat32m4_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32m4_f64m1_m(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1 (vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_vs_f32m4_f64m1(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1_m( +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dst, - vfloat32m8_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32m8_f64m1_m(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1 (vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_vs_f32m8_f64m1(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf4_f32m1( +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv1f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1 (vfloat32m1_t dest, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16mf4_f32m1(dest, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_m (vbool64_t mask, vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_vs_f16mf4_f32m1_m(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf2_f32m1( +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv2f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1 (vfloat32m1_t dest, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16mf2_f32m1(dest, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_vs_f16mf2_f32m1_m(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m1_f32m1( +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m1_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv4f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1 (vfloat32m1_t dest, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16m1_f32m1(dest, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_m (vbool16_t mask, vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_vs_f16m1_f32m1_m(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m2_f32m1( +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv8f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1 (vfloat32m1_t dest, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16m2_f32m1(dest, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_m (vbool8_t mask, vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_vs_f16m2_f32m1_m(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m4_f32m1( +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv16f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1 (vfloat32m1_t dest, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16m4_f32m1(dest, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_m (vbool4_t mask, vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_vs_f16m4_f32m1_m(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m8_f32m1( +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m8_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv32f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1 (vfloat32m1_t dest, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16m8_f32m1(dest, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_m (vbool2_t mask, vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_vs_f16m8_f32m1_m(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf4_f32m1_m( +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_vs_f32mf2_f64m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_m (vbool32_t mask, vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_vs_f32m1_f64m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_m (vbool16_t mask, vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_vs_f32m2_f64m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_m (vbool8_t mask, vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_vs_f32m4_f64m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_m (vbool4_t mask, vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_vs_f32m8_f64m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv1f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_m (vbool64_t mask, vfloat32m1_t dest, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16mf4_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1 (vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_vs_f16mf4_f32m1(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf2_f32m1_m( +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv2f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_m (vbool32_t mask, vfloat32m1_t dest, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16mf2_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1 (vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_vs_f16mf2_f32m1(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m1_f32m1_m( +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m1_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv4f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_m (vbool16_t mask, vfloat32m1_t dest, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16m1_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1 (vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_vs_f16m1_f32m1(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m2_f32m1_m( +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv8f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_m (vbool8_t mask, vfloat32m1_t dest, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16m2_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1 (vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_vs_f16m2_f32m1(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m4_f32m1_m( +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv16f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_m (vbool4_t mask, vfloat32m1_t dest, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16m4_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1 (vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_vs_f16m4_f32m1(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m8_f32m1_m( +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m8_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_m (vbool2_t mask, vfloat32m1_t dest, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16m8_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1 (vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_vs_f16m8_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1(vfloat64m1_t dst, - vfloat32mf2_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32mf2_f64m1(dst, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1 (vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_vs_f32mf2_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m1_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1(vfloat64m1_t dst, - vfloat32m1_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32m1_f64m1(dst, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1 (vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_vs_f32m1_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1(vfloat64m1_t dst, - vfloat32m2_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32m2_f64m1(dst, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1 (vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_vs_f32m2_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m4_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1(vfloat64m1_t dst, - vfloat32m4_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32m4_f64m1(dst, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1 (vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_vs_f32m4_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m8_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1(vfloat64m1_t dst, - vfloat32m8_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32m8_f64m1(dst, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1 (vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_vs_f32m8_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf4_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_m (vbool64_t mask, vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_vs_f16mf4_f32m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf2_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_vs_f16mf2_f32m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m1_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_m (vbool16_t mask, vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_vs_f16m1_f32m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m2_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_m (vbool8_t mask, vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_vs_f16m2_f32m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m4_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_m (vbool4_t mask, vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_vs_f16m4_f32m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m8_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_m (vbool2_t mask, vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_vs_f16m8_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst, - vfloat32mf2_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32mf2_f64m1_m(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_vs_f32mf2_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst, - vfloat32m1_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32m1_f64m1_m(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_m (vbool32_t mask, vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_vs_f32m1_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst, - vfloat32m2_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32m2_f64m1_m(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_m (vbool16_t mask, vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_vs_f32m2_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst, - vfloat32m4_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32m4_f64m1_m(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_m (vbool8_t mask, vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_vs_f32m4_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dst, - vfloat32m8_t vector, - vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32m8_f64m1_m(mask, dst, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_m (vbool4_t mask, vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_vs_f32m8_f64m1_m(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf4_f32m1( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf4_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv1f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1 (vfloat32m1_t dest, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16mf4_f32m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_tu (vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16mf4_f16m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf2_f32m1( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf2_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv2f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1 (vfloat32m1_t dest, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16mf2_f32m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_tu (vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16mf2_f16m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m1_f32m1( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m1_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m1_f16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m2_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m2_f16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m4_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m4_f16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m8_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m8_f16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv4f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1 (vfloat32m1_t dest, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16m1_f32m1(dest, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tu (vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32mf2_f32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m2_f32m1( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv8f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1 (vfloat32m1_t dest, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16m2_f32m1(dest, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m1_f32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m4_f32m1( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv16f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1 (vfloat32m1_t dest, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16m4_f32m1(dest, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m2_f32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m8_f32m1( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv32f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1 (vfloat32m1_t dest, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16m8_f32m1(dest, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m4_f32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf4_f32m1_m( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv1f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_m (vbool64_t mask, vfloat32m1_t dest, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16mf4_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m8_f32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf2_f32m1_m( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m1_f64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m2_f64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m4_f64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m8_f64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf4_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_ta (vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16mf4_f16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf2_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_ta (vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16mf2_f16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m1_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_ta (vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m1_f16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m2_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_ta (vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m2_f16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m4_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_ta (vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m4_f16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m8_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_ta (vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m8_f16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv2f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_m (vbool32_t mask, vfloat32m1_t dest, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16mf2_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_ta (vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32mf2_f32m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m1_f32m1_m( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv4f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_m (vbool16_t mask, vfloat32m1_t dest, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16m1_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_ta (vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m1_f32m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m2_f32m1_m( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv8f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_m (vbool8_t mask, vfloat32m1_t dest, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16m2_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_ta (vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m2_f32m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m4_f32m1_m( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv16f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_m (vbool4_t mask, vfloat32m1_t dest, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16m4_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_ta (vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m4_f32m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m8_f32m1_m( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv32f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_m (vbool2_t mask, vfloat32m1_t dest, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16m8_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_ta (vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m8_f32m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_tu( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tu(vfloat64m1_t merge, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32mf2_f64m1_tu(merge, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_ta (vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m1_f64m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_ta( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_ta(vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32mf2_f64m1_ta(vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_ta (vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m2_f64m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_tum( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tum(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32mf2_f64m1_tum(mask, merge, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_ta (vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m4_f64m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_tam( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32mf2_f64m1_tam(mask, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_ta (vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m8_f64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf4_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_tum (vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16mf4_f16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf2_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_tum (vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16mf2_f16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m1_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_tum (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m1_f16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m2_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_tum (vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m2_f16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m4_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_tum (vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m4_f16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m8_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_tum (vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m8_f16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tum (vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32mf2_f32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_tum (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m1_f32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_tum (vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m2_f32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_tum (vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m4_f32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_tum (vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m8_f32m1_tum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_tu( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1_tum( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tu(vfloat64m1_t merge, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32mf2_f64m1_tu(merge, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_tum (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m1_f64m1_tum(mask, maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_ta( +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1_tum( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_tum (vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m2_f64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_tum (vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m4_f64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_tum (vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m8_f64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf4_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_tam (vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16mf4_f16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf2_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_tam (vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16mf2_f16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m1_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_tam (vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m1_f16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m2_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_tam (vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m2_f16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m4_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_tam (vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m4_f16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m8_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_tam (vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m8_f16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tam (vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32mf2_f32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_tam (vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m1_f32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_tam (vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m2_f32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_tam (vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m4_f32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_tam (vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m8_f32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_tam (vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m1_f64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_tam (vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m2_f64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_tam (vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m4_f64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_tam (vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m8_f64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf4_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_tu (vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16mf4_f16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf2_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_tu (vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16mf2_f16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m1_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m1_f16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m2_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m2_f16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m4_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m4_f16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m8_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m8_f16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tu (vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32mf2_f32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m1_f32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m2_f32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m4_f32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m8_f32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m1_f64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m2_f64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m4_f64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m8_f64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf4_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_ta (vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16mf4_f16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf2_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_ta (vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16mf2_f16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m1_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_ta (vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m1_f16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m2_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_ta (vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m2_f16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m4_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_ta (vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m4_f16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m8_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_ta (vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m8_f16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_ta (vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32mf2_f32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_ta (vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m1_f32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_ta (vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m2_f32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_ta (vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m4_f32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_ta (vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m8_f32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_ta (vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m1_f64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_ta (vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m2_f64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_ta (vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m4_f64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_ta (vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m8_f64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf4_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_tum (vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16mf4_f16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf2_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_tum (vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16mf2_f16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m1_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_tum (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m1_f16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m2_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_tum (vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m2_f16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m4_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_tum (vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m4_f16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m8_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_tum (vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m8_f16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tum (vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32mf2_f32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_tum (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m1_f32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_tum (vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m2_f32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_tum (vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m4_f32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_tum (vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m8_f32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_tum (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m1_f64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_tum (vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m2_f64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_tum (vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m4_f64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_tum (vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m8_f64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf4_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_tam (vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16mf4_f16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf2_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_tam (vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16mf2_f16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m1_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_tam (vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m1_f16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m2_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_tam (vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m2_f16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m4_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_tam (vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m4_f16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m8_f16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_tam (vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m8_f16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tam (vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32mf2_f32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_tam (vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m1_f32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_tam (vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m2_f32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_tam (vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m4_f32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_tam (vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m8_f32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_tam (vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m1_f64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_ta(vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32mf2_f64m1_ta(vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_tam (vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m2_f64m1_tam(mask, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_tum( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1_tam( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tum(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32mf2_f64m1_tum(mask, merge, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_tam (vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m4_f64m1_tam(mask, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_tam( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1_tam( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32mf2_f64m1_tam(mask, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_tam (vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m8_f64m1_tam(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredand.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredand.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredand.c @@ -6,996 +6,2376 @@ // CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector, - vint8m1_t scalar, size_t vl) { - return vredand_vs_i8mf8_i8m1(dst, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf8_i8m1 (vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8mf8_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector, - vint8m1_t scalar, size_t vl) { - return vredand_vs_i8mf4_i8m1(dst, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf4_i8m1 (vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8mf4_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector, - vint8m1_t scalar, size_t vl) { - return vredand_vs_i8mf2_i8m1(dst, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf2_i8m1 (vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8mf2_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector, - vint8m1_t scalar, size_t vl) { - return vredand_vs_i8m1_i8m1(dst, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m1_i8m1 (vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8m1_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector, - vint8m1_t scalar, size_t vl) { - return vredand_vs_i8m2_i8m1(dst, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m2_i8m1 (vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8m2_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector, - vint8m1_t scalar, size_t vl) { - return vredand_vs_i8m4_i8m1(dst, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m4_i8m1 (vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8m4_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector, - vint8m1_t scalar, size_t vl) { - return vredand_vs_i8m8_i8m1(dst, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m8_i8m1 (vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8m8_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector, - vint16m1_t scalar, size_t vl) { - return vredand_vs_i16mf4_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16mf4_i16m1 (vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16mf4_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector, - vint16m1_t scalar, size_t vl) { - return vredand_vs_i16mf2_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16mf2_i16m1 (vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16mf2_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector, - vint16m1_t scalar, size_t vl) { - return vredand_vs_i16m1_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m1_i16m1 (vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16m1_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector, - vint16m1_t scalar, size_t vl) { - return vredand_vs_i16m2_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m2_i16m1 (vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16m2_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector, - vint16m1_t scalar, size_t vl) { - return vredand_vs_i16m4_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m4_i16m1 (vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16m4_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector, - vint16m1_t scalar, size_t vl) { - return vredand_vs_i16m8_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m8_i16m1 (vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16m8_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector, - vint32m1_t scalar, size_t vl) { - return vredand_vs_i32mf2_i32m1(dst, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32mf2_i32m1 (vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32mf2_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector, - vint32m1_t scalar, size_t vl) { - return vredand_vs_i32m1_i32m1(dst, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m1_i32m1 (vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32m1_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector, - vint32m1_t scalar, size_t vl) { - return vredand_vs_i32m2_i32m1(dst, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m2_i32m1 (vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32m2_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector, - vint32m1_t scalar, size_t vl) { - return vredand_vs_i32m4_i32m1(dst, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m4_i32m1 (vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32m4_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector, - vint32m1_t scalar, size_t vl) { - return vredand_vs_i32m8_i32m1(dst, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m8_i32m1 (vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32m8_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector, - vint64m1_t scalar, size_t vl) { - return vredand_vs_i64m1_i64m1(dst, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m1_i64m1 (vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredand_vs_i64m1_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector, - vint64m1_t scalar, size_t vl) { - return vredand_vs_i64m2_i64m1(dst, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m2_i64m1 (vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredand_vs_i64m2_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector, - vint64m1_t scalar, size_t vl) { - return vredand_vs_i64m4_i64m1(dst, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m4_i64m1 (vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredand_vs_i64m4_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector, - vint64m1_t scalar, size_t vl) { - return vredand_vs_i64m8_i64m1(dst, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m8_i64m1 (vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredand_vs_i64m8_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector, - vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8mf8_u8m1(dst, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf8_u8m1 (vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8mf8_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector, - vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8mf4_u8m1(dst, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf4_u8m1 (vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8mf4_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector, - vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8mf2_u8m1(dst, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf2_u8m1 (vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8mf2_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector, - vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8m1_u8m1(dst, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m1_u8m1 (vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8m1_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector, - vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8m2_u8m1(dst, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m2_u8m1 (vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8m2_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector, - vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8m4_u8m1(dst, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m4_u8m1 (vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8m4_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector, - vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8m8_u8m1(dst, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m8_u8m1 (vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8m8_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16mf4_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16mf4_u16m1 (vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16mf4_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16mf2_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16mf2_u16m1 (vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16mf2_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector, - vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16m1_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m1_u16m1 (vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m1_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16m2_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m2_u16m1 (vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m2_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16m4_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m4_u16m1 (vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m4_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector, - vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16m8_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m8_u16m1 (vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m8_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32mf2_u32m1(dst, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32mf2_u32m1 (vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32mf2_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector, - vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32m1_u32m1(dst, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m1_u32m1 (vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m1_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32m2_u32m1(dst, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m2_u32m1 (vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m2_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector, - vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32m4_u32m1(dst, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m4_u32m1 (vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m4_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector, - vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32m8_u32m1(dst, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m8_u32m1 (vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m8_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector, - vuint64m1_t scalar, size_t vl) { - return vredand_vs_u64m1_u64m1(dst, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m1_u64m1 (vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m1_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector, - vuint64m1_t scalar, size_t vl) { - return vredand_vs_u64m2_u64m1(dst, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m2_u64m1 (vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m2_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector, - vuint64m1_t scalar, size_t vl) { - return vredand_vs_u64m4_u64m1(dst, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m4_u64m1 (vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m4_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector, - vuint64m1_t scalar, size_t vl) { - return vredand_vs_u64m8_u64m1(dst, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m8_u64m1 (vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m8_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst, - vint8mf8_t vector, vint8m1_t scalar, - size_t vl) { - return vredand_vs_i8mf8_i8m1_m(mask, dst, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf8_i8m1_m (vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8mf8_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst, - vint8mf4_t vector, vint8m1_t scalar, - size_t vl) { - return vredand_vs_i8mf4_i8m1_m(mask, dst, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf4_i8m1_m (vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8mf4_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst, - vint8mf2_t vector, vint8m1_t scalar, - size_t vl) { - return vredand_vs_i8mf2_i8m1_m(mask, dst, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf2_i8m1_m (vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8mf2_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst, - vint8m1_t vector, vint8m1_t scalar, - size_t vl) { - return vredand_vs_i8m1_i8m1_m(mask, dst, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m1_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8m1_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst, - vint8m2_t vector, vint8m1_t scalar, - size_t vl) { - return vredand_vs_i8m2_i8m1_m(mask, dst, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m2_i8m1_m (vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8m2_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst, - vint8m4_t vector, vint8m1_t scalar, - size_t vl) { - return vredand_vs_i8m4_i8m1_m(mask, dst, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m4_i8m1_m (vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8m4_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst, - vint8m8_t vector, vint8m1_t scalar, - size_t vl) { - return vredand_vs_i8m8_i8m1_m(mask, dst, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m8_i8m1_m (vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8m8_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst, - vint16mf4_t vector, vint16m1_t scalar, - size_t vl) { - return vredand_vs_i16mf4_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16mf4_i16m1_m (vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16mf4_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst, - vint16mf2_t vector, vint16m1_t scalar, - size_t vl) { - return vredand_vs_i16mf2_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16mf2_i16m1_m (vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16mf2_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst, - vint16m1_t vector, vint16m1_t scalar, - size_t vl) { - return vredand_vs_i16m1_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m1_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16m1_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst, - vint16m2_t vector, vint16m1_t scalar, - size_t vl) { - return vredand_vs_i16m2_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m2_i16m1_m (vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16m2_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst, - vint16m4_t vector, vint16m1_t scalar, - size_t vl) { - return vredand_vs_i16m4_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m4_i16m1_m (vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16m4_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst, - vint16m8_t vector, vint16m1_t scalar, - size_t vl) { - return vredand_vs_i16m8_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m8_i16m1_m (vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16m8_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst, - vint32mf2_t vector, vint32m1_t scalar, - size_t vl) { - return vredand_vs_i32mf2_i32m1_m(mask, dst, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32mf2_i32m1_m (vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32mf2_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst, - vint32m1_t vector, vint32m1_t scalar, - size_t vl) { - return vredand_vs_i32m1_i32m1_m(mask, dst, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m1_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32m1_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst, - vint32m2_t vector, vint32m1_t scalar, - size_t vl) { - return vredand_vs_i32m2_i32m1_m(mask, dst, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m2_i32m1_m (vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32m2_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst, - vint32m4_t vector, vint32m1_t scalar, - size_t vl) { - return vredand_vs_i32m4_i32m1_m(mask, dst, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m4_i32m1_m (vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32m4_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst, - vint32m8_t vector, vint32m1_t scalar, - size_t vl) { - return vredand_vs_i32m8_i32m1_m(mask, dst, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m8_i32m1_m (vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32m8_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst, - vint64m1_t vector, vint64m1_t scalar, - size_t vl) { - return vredand_vs_i64m1_i64m1_m(mask, dst, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m1_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredand_vs_i64m1_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst, - vint64m2_t vector, vint64m1_t scalar, - size_t vl) { - return vredand_vs_i64m2_i64m1_m(mask, dst, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m2_i64m1_m (vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredand_vs_i64m2_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst, - vint64m4_t vector, vint64m1_t scalar, - size_t vl) { - return vredand_vs_i64m4_i64m1_m(mask, dst, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m4_i64m1_m (vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredand_vs_i64m4_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst, - vint64m8_t vector, vint64m1_t scalar, - size_t vl) { - return vredand_vs_i64m8_i64m1_m(mask, dst, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m8_i64m1_m (vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredand_vs_i64m8_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst, - vuint8mf8_t vector, vuint8m1_t scalar, - size_t vl) { - return vredand_vs_u8mf8_u8m1_m(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf8_u8m1_m (vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8mf8_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst, - vuint8mf4_t vector, vuint8m1_t scalar, - size_t vl) { - return vredand_vs_u8mf4_u8m1_m(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf4_u8m1_m (vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8mf4_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst, - vuint8mf2_t vector, vuint8m1_t scalar, - size_t vl) { - return vredand_vs_u8mf2_u8m1_m(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf2_u8m1_m (vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8mf2_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst, - vuint8m1_t vector, vuint8m1_t scalar, - size_t vl) { - return vredand_vs_u8m1_u8m1_m(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m1_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8m1_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst, - vuint8m2_t vector, vuint8m1_t scalar, - size_t vl) { - return vredand_vs_u8m2_u8m1_m(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m2_u8m1_m (vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8m2_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst, - vuint8m4_t vector, vuint8m1_t scalar, - size_t vl) { - return vredand_vs_u8m4_u8m1_m(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m4_u8m1_m (vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8m4_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst, - vuint8m8_t vector, vuint8m1_t scalar, - size_t vl) { - return vredand_vs_u8m8_u8m1_m(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m8_u8m1_m (vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8m8_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst, - vuint16mf4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16mf4_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16mf4_u16m1_m (vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16mf4_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst, - vuint16mf2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16mf2_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16mf2_u16m1_m (vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16mf2_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst, - vuint16m1_t vector, - vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16m1_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m1_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m1_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst, - vuint16m2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16m2_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m2_u16m1_m (vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m2_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst, - vuint16m4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16m4_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m4_u16m1_m (vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m4_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst, - vuint16m8_t vector, - vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16m8_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m8_u16m1_m (vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m8_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst, - vuint32mf2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32mf2_u32m1_m(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32mf2_u32m1_m (vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32mf2_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst, - vuint32m1_t vector, - vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32m1_u32m1_m(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m1_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m1_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst, - vuint32m2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32m2_u32m1_m(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m2_u32m1_m (vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m2_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst, - vuint32m4_t vector, - vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32m4_u32m1_m(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m4_u32m1_m (vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m4_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst, - vuint32m8_t vector, - vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32m8_u32m1_m(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m8_u32m1_m (vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m8_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst, - vuint64m1_t vector, - vuint64m1_t scalar, size_t vl) { - return vredand_vs_u64m1_u64m1_m(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m1_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m1_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst, - vuint64m2_t vector, - vuint64m1_t scalar, size_t vl) { - return vredand_vs_u64m2_u64m1_m(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m2_u64m1_m (vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m2_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst, - vuint64m4_t vector, - vuint64m1_t scalar, size_t vl) { - return vredand_vs_u64m4_u64m1_m(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m4_u64m1_m (vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m4_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst, - vuint64m8_t vector, - vuint64m1_t scalar, size_t vl) { - return vredand_vs_u64m8_u64m1_m(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m8_u64m1_m (vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m8_u64m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8mf8_i8m1_tu (vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8mf8_i8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8mf4_i8m1_tu (vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8mf4_i8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8mf2_i8m1_tu (vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8mf2_i8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m1_i8m1_tu (vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8m1_i8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m2_i8m1_tu (vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8m2_i8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m4_i8m1_tu (vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8m4_i8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m8_i8m1_tu (vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8m8_i8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16mf4_i16m1_tu (vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16mf4_i16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16mf2_i16m1_tu (vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16mf2_i16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m1_i16m1_tu (vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16m1_i16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m2_i16m1_tu (vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16m2_i16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m4_i16m1_tu (vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16m4_i16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m8_i16m1_tu (vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16m8_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32mf2_i32m1_tu(vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredand_vs_i32mf2_i32m1_tu(merge, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32mf2_i32m1_tu (vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32mf2_i32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_tu( +// CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32mf2_u32m1_tu(merge, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m1_i32m1_tu (vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32m1_i32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_ta( +// CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredand_vs_i32mf2_i32m1_ta(vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m2_i32m1_tu (vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32m2_i32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_ta( +// CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32mf2_u32m1_ta(vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m4_i32m1_tu (vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32m4_i32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_tum( +// CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredand_vs_i32mf2_i32m1_tum(mask, merge, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m8_i32m1_tu (vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32m8_i32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_tum( +// CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m1_i64m1_tu (vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredand_vs_i64m1_i64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m2_i64m1_tu (vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredand_vs_i64m2_i64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m4_i64m1_tu (vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredand_vs_i64m4_i64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m8_i64m1_tu (vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredand_vs_i64m8_i64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8mf8_u8m1_tu (vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8mf8_u8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8mf4_u8m1_tu (vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8mf4_u8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8mf2_u8m1_tu (vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8mf2_u8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m1_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8m1_u8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m2_u8m1_tu (vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8m2_u8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m4_u8m1_tu (vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8m4_u8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m8_u8m1_tu (vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8m8_u8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16mf4_u16m1_tu (vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16mf4_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16mf2_u16m1_tu (vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16mf2_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m1_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m1_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m2_u16m1_tu (vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m2_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m4_u16m1_tu (vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m4_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m8_u16m1_tu (vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m8_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32mf2_u32m1_tum(mask, merge, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32mf2_u32m1_tu (vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32mf2_u32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_tam( +// CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredand_vs_i32mf2_i32m1_tam(mask, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m1_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m1_u32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_tam( +// CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32mf2_u32m1_tam(mask, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m2_u32m1_tu (vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m2_u32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m4_u32m1_tu (vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m4_u32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m8_u32m1_tu (vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m8_u32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m1_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m1_u64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m2_u64m1_tu (vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m2_u64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m4_u64m1_tu (vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m4_u64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m8_u64m1_tu (vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m8_u64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8mf8_i8m1_ta (vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8mf8_i8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8mf4_i8m1_ta (vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8mf4_i8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8mf2_i8m1_ta (vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8mf2_i8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m1_i8m1_ta (vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8m1_i8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m2_i8m1_ta (vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8m2_i8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m4_i8m1_ta (vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8m4_i8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m8_i8m1_ta (vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8m8_i8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16mf4_i16m1_ta (vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16mf4_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16mf2_i16m1_ta (vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16mf2_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m1_i16m1_ta (vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16m1_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m2_i16m1_ta (vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16m2_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m4_i16m1_ta (vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16m4_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m8_i16m1_ta (vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16m8_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32mf2_i32m1_ta (vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32mf2_i32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m1_i32m1_ta (vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32m1_i32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m2_i32m1_ta (vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32m2_i32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m4_i32m1_ta (vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32m4_i32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m8_i32m1_ta (vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32m8_i32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m1_i64m1_ta (vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredand_vs_i64m1_i64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m2_i64m1_ta (vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredand_vs_i64m2_i64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m4_i64m1_ta (vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredand_vs_i64m4_i64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m8_i64m1_ta (vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredand_vs_i64m8_i64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8mf8_u8m1_ta (vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8mf8_u8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8mf4_u8m1_ta (vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8mf4_u8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8mf2_u8m1_ta (vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8mf2_u8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m1_u8m1_ta (vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8m1_u8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m2_u8m1_ta (vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8m2_u8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m4_u8m1_ta (vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8m4_u8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m8_u8m1_ta (vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8m8_u8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16mf4_u16m1_ta (vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16mf4_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16mf2_u16m1_ta (vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16mf2_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m1_u16m1_ta (vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m1_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m2_u16m1_ta (vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m2_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m4_u16m1_ta (vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m4_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m8_u16m1_ta (vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m8_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32mf2_u32m1_ta (vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32mf2_u32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m1_u32m1_ta (vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m1_u32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m2_u32m1_ta (vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m2_u32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m4_u32m1_ta (vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m4_u32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m8_u32m1_ta (vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m8_u32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m1_u64m1_ta (vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m1_u64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m2_u64m1_ta (vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m2_u64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m4_u64m1_ta (vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m4_u64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m8_u64m1_ta (vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m8_u64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8mf8_i8m1_tum (vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8mf8_i8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8mf4_i8m1_tum (vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8mf4_i8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8mf2_i8m1_tum (vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8mf2_i8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m1_i8m1_tum (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8m1_i8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m2_i8m1_tum (vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8m2_i8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m4_i8m1_tum (vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8m4_i8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m8_i8m1_tum (vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8m8_i8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16mf4_i16m1_tum (vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16mf4_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16mf2_i16m1_tum (vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16mf2_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m1_i16m1_tum (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16m1_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m2_i16m1_tum (vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16m2_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m4_i16m1_tum (vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16m4_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m8_i16m1_tum (vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16m8_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32mf2_i32m1_tum (vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32mf2_i32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m1_i32m1_tum (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32m1_i32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m2_i32m1_tum (vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32m2_i32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m4_i32m1_tum (vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32m4_i32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m8_i32m1_tum (vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32m8_i32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m1_i64m1_tum (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredand_vs_i64m1_i64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m2_i64m1_tum (vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredand_vs_i64m2_i64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m4_i64m1_tum (vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredand_vs_i64m4_i64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m8_i64m1_tum (vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredand_vs_i64m8_i64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8mf8_u8m1_tum (vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8mf8_u8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8mf4_u8m1_tum (vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8mf4_u8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8mf2_u8m1_tum (vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8mf2_u8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m1_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8m1_u8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m2_u8m1_tum (vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8m2_u8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m4_u8m1_tum (vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8m4_u8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m8_u8m1_tum (vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8m8_u8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16mf4_u16m1_tum (vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16mf4_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16mf2_u16m1_tum (vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16mf2_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m1_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m1_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m2_u16m1_tum (vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m2_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m4_u16m1_tum (vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m4_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m8_u16m1_tum (vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m8_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32mf2_u32m1_tum (vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32mf2_u32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m1_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m1_u32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m2_u32m1_tum (vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m2_u32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m4_u32m1_tum (vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m4_u32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m8_u32m1_tum (vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m8_u32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m1_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m1_u64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m2_u64m1_tum (vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m2_u64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m4_u64m1_tum (vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m4_u64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m8_u64m1_tum (vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m8_u64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8mf8_i8m1_tam (vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8mf8_i8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8mf4_i8m1_tam (vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8mf4_i8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8mf2_i8m1_tam (vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8mf2_i8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m1_i8m1_tam (vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8m1_i8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m2_i8m1_tam (vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8m2_i8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m4_i8m1_tam (vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8m4_i8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m8_i8m1_tam (vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8m8_i8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16mf4_i16m1_tam (vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16mf4_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16mf2_i16m1_tam (vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16mf2_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m1_i16m1_tam (vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16m1_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m2_i16m1_tam (vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16m2_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m4_i16m1_tam (vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16m4_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m8_i16m1_tam (vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16m8_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32mf2_i32m1_tam (vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32mf2_i32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m1_i32m1_tam (vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32m1_i32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m2_i32m1_tam (vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32m2_i32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m4_i32m1_tam (vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32m4_i32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m8_i32m1_tam (vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32m8_i32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m1_i64m1_tam (vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredand_vs_i64m1_i64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m2_i64m1_tam (vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredand_vs_i64m2_i64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m4_i64m1_tam (vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredand_vs_i64m4_i64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m8_i64m1_tam (vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredand_vs_i64m8_i64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8mf8_u8m1_tam (vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8mf8_u8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8mf4_u8m1_tam (vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8mf4_u8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8mf2_u8m1_tam (vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8mf2_u8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m1_u8m1_tam (vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8m1_u8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m2_u8m1_tam (vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8m2_u8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m4_u8m1_tam (vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8m4_u8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m8_u8m1_tam (vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8m8_u8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16mf4_u16m1_tam (vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16mf4_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16mf2_u16m1_tam (vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16mf2_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m1_u16m1_tam (vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m1_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m2_u16m1_tam (vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m2_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m4_u16m1_tam (vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m4_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m8_u16m1_tam (vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m8_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32mf2_u32m1_tam (vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32mf2_u32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m1_u32m1_tam (vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m1_u32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m2_u32m1_tam (vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m2_u32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m4_u32m1_tam (vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m4_u32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m8_u32m1_tam (vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m8_u32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m1_u64m1_tam (vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m1_u64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m2_u64m1_tam (vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m2_u64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m4_u64m1_tam (vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m4_u64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m8_u64m1_tam (vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m8_u64m1_tam(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmax.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmax.c @@ -6,996 +6,2376 @@ // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector, - vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8mf8_i8m1(dst, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf8_i8m1 (vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8mf8_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector, - vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8mf4_i8m1(dst, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf4_i8m1 (vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8mf4_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector, - vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8mf2_i8m1(dst, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf2_i8m1 (vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8mf2_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector, - vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8m1_i8m1(dst, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m1_i8m1 (vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8m1_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector, - vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8m2_i8m1(dst, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m2_i8m1 (vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8m2_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector, - vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8m4_i8m1(dst, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m4_i8m1 (vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8m4_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector, - vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8m8_i8m1(dst, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m8_i8m1 (vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8m8_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector, - vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16mf4_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16mf4_i16m1 (vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16mf4_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector, - vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16mf2_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16mf2_i16m1 (vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16mf2_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector, - vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16m1_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m1_i16m1 (vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16m1_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector, - vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16m2_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m2_i16m1 (vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16m2_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector, - vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16m4_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m4_i16m1 (vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16m4_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector, - vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16m8_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m8_i16m1 (vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16m8_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector, - vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32mf2_i32m1(dst, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32mf2_i32m1 (vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32mf2_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector, - vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32m1_i32m1(dst, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m1_i32m1 (vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32m1_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector, - vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32m2_i32m1(dst, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m2_i32m1 (vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32m2_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector, - vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32m4_i32m1(dst, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m4_i32m1 (vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32m4_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector, - vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32m8_i32m1(dst, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m8_i32m1 (vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32m8_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector, - vint64m1_t scalar, size_t vl) { - return vredmax_vs_i64m1_i64m1(dst, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m1_i64m1 (vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_vs_i64m1_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector, - vint64m1_t scalar, size_t vl) { - return vredmax_vs_i64m2_i64m1(dst, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m2_i64m1 (vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_vs_i64m2_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector, - vint64m1_t scalar, size_t vl) { - return vredmax_vs_i64m4_i64m1(dst, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m4_i64m1 (vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_vs_i64m4_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector, - vint64m1_t scalar, size_t vl) { - return vredmax_vs_i64m8_i64m1(dst, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m8_i64m1 (vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_vs_i64m8_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector, - vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8mf8_u8m1(dst, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1 (vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8mf8_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector, - vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8mf4_u8m1(dst, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1 (vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8mf4_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector, - vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8mf2_u8m1(dst, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1 (vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8mf2_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector, - vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8m1_u8m1(dst, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m1_u8m1 (vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8m1_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector, - vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8m2_u8m1(dst, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m2_u8m1 (vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8m2_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector, - vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8m4_u8m1(dst, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m4_u8m1 (vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8m4_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector, - vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8m8_u8m1(dst, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m8_u8m1 (vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8m8_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16mf4_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1 (vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16mf4_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16mf2_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1 (vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16mf2_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector, - vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16m1_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m1_u16m1 (vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m1_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16m2_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m2_u16m1 (vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m2_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16m4_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m4_u16m1 (vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m4_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector, - vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16m8_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m8_u16m1 (vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m8_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32mf2_u32m1(dst, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1 (vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32mf2_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector, - vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32m1_u32m1(dst, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m1_u32m1 (vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m1_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32m2_u32m1(dst, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m2_u32m1 (vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m2_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector, - vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32m4_u32m1(dst, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m4_u32m1 (vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m4_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector, - vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32m8_u32m1(dst, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m8_u32m1 (vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m8_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector, - vuint64m1_t scalar, size_t vl) { - return vredmaxu_vs_u64m1_u64m1(dst, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m1_u64m1 (vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m1_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector, - vuint64m1_t scalar, size_t vl) { - return vredmaxu_vs_u64m2_u64m1(dst, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m2_u64m1 (vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m2_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector, - vuint64m1_t scalar, size_t vl) { - return vredmaxu_vs_u64m4_u64m1(dst, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m4_u64m1 (vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m4_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector, - vuint64m1_t scalar, size_t vl) { - return vredmaxu_vs_u64m8_u64m1(dst, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m8_u64m1 (vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m8_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst, - vint8mf8_t vector, vint8m1_t scalar, - size_t vl) { - return vredmax_vs_i8mf8_i8m1_m(mask, dst, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf8_i8m1_m (vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8mf8_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst, - vint8mf4_t vector, vint8m1_t scalar, - size_t vl) { - return vredmax_vs_i8mf4_i8m1_m(mask, dst, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf4_i8m1_m (vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8mf4_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst, - vint8mf2_t vector, vint8m1_t scalar, - size_t vl) { - return vredmax_vs_i8mf2_i8m1_m(mask, dst, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf2_i8m1_m (vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8mf2_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst, - vint8m1_t vector, vint8m1_t scalar, - size_t vl) { - return vredmax_vs_i8m1_i8m1_m(mask, dst, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m1_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8m1_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst, - vint8m2_t vector, vint8m1_t scalar, - size_t vl) { - return vredmax_vs_i8m2_i8m1_m(mask, dst, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m2_i8m1_m (vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8m2_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst, - vint8m4_t vector, vint8m1_t scalar, - size_t vl) { - return vredmax_vs_i8m4_i8m1_m(mask, dst, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m4_i8m1_m (vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8m4_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst, - vint8m8_t vector, vint8m1_t scalar, - size_t vl) { - return vredmax_vs_i8m8_i8m1_m(mask, dst, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m8_i8m1_m (vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8m8_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst, - vint16mf4_t vector, vint16m1_t scalar, - size_t vl) { - return vredmax_vs_i16mf4_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16mf4_i16m1_m (vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16mf4_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst, - vint16mf2_t vector, vint16m1_t scalar, - size_t vl) { - return vredmax_vs_i16mf2_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16mf2_i16m1_m (vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16mf2_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst, - vint16m1_t vector, vint16m1_t scalar, - size_t vl) { - return vredmax_vs_i16m1_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m1_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16m1_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst, - vint16m2_t vector, vint16m1_t scalar, - size_t vl) { - return vredmax_vs_i16m2_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m2_i16m1_m (vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16m2_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst, - vint16m4_t vector, vint16m1_t scalar, - size_t vl) { - return vredmax_vs_i16m4_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m4_i16m1_m (vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16m4_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst, - vint16m8_t vector, vint16m1_t scalar, - size_t vl) { - return vredmax_vs_i16m8_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m8_i16m1_m (vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16m8_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst, - vint32mf2_t vector, vint32m1_t scalar, - size_t vl) { - return vredmax_vs_i32mf2_i32m1_m(mask, dst, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32mf2_i32m1_m (vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32mf2_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst, - vint32m1_t vector, vint32m1_t scalar, - size_t vl) { - return vredmax_vs_i32m1_i32m1_m(mask, dst, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m1_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32m1_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst, - vint32m2_t vector, vint32m1_t scalar, - size_t vl) { - return vredmax_vs_i32m2_i32m1_m(mask, dst, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m2_i32m1_m (vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32m2_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst, - vint32m4_t vector, vint32m1_t scalar, - size_t vl) { - return vredmax_vs_i32m4_i32m1_m(mask, dst, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m4_i32m1_m (vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32m4_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst, - vint32m8_t vector, vint32m1_t scalar, - size_t vl) { - return vredmax_vs_i32m8_i32m1_m(mask, dst, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m8_i32m1_m (vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32m8_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst, - vint64m1_t vector, vint64m1_t scalar, - size_t vl) { - return vredmax_vs_i64m1_i64m1_m(mask, dst, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m1_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_vs_i64m1_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst, - vint64m2_t vector, vint64m1_t scalar, - size_t vl) { - return vredmax_vs_i64m2_i64m1_m(mask, dst, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m2_i64m1_m (vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_vs_i64m2_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst, - vint64m4_t vector, vint64m1_t scalar, - size_t vl) { - return vredmax_vs_i64m4_i64m1_m(mask, dst, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m4_i64m1_m (vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_vs_i64m4_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst, - vint64m8_t vector, vint64m1_t scalar, - size_t vl) { - return vredmax_vs_i64m8_i64m1_m(mask, dst, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m8_i64m1_m (vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_vs_i64m8_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst, - vuint8mf8_t vector, vuint8m1_t scalar, - size_t vl) { - return vredmaxu_vs_u8mf8_u8m1_m(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_m (vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8mf8_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst, - vuint8mf4_t vector, vuint8m1_t scalar, - size_t vl) { - return vredmaxu_vs_u8mf4_u8m1_m(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_m (vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8mf4_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst, - vuint8mf2_t vector, vuint8m1_t scalar, - size_t vl) { - return vredmaxu_vs_u8mf2_u8m1_m(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_m (vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8mf2_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst, - vuint8m1_t vector, vuint8m1_t scalar, - size_t vl) { - return vredmaxu_vs_u8m1_u8m1_m(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8m1_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst, - vuint8m2_t vector, vuint8m1_t scalar, - size_t vl) { - return vredmaxu_vs_u8m2_u8m1_m(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_m (vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8m2_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst, - vuint8m4_t vector, vuint8m1_t scalar, - size_t vl) { - return vredmaxu_vs_u8m4_u8m1_m(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_m (vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8m4_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst, - vuint8m8_t vector, vuint8m1_t scalar, - size_t vl) { - return vredmaxu_vs_u8m8_u8m1_m(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_m (vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8m8_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst, - vuint16mf4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16mf4_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_m (vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16mf4_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst, - vuint16mf2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16mf2_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_m (vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16mf2_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst, - vuint16m1_t vector, - vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16m1_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m1_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst, - vuint16m2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16m2_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_m (vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m2_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst, - vuint16m4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16m4_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_m (vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m4_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst, - vuint16m8_t vector, - vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16m8_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_m (vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m8_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst, - vuint32mf2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32mf2_u32m1_m(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_m (vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32mf2_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst, - vuint32m1_t vector, - vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32m1_u32m1_m(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m1_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst, - vuint32m2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32m2_u32m1_m(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_m (vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m2_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst, - vuint32m4_t vector, - vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32m4_u32m1_m(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_m (vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m4_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst, - vuint32m8_t vector, - vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32m8_u32m1_m(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_m (vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m8_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst, - vuint64m1_t vector, - vuint64m1_t scalar, size_t vl) { - return vredmaxu_vs_u64m1_u64m1_m(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m1_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst, - vuint64m2_t vector, - vuint64m1_t scalar, size_t vl) { - return vredmaxu_vs_u64m2_u64m1_m(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_m (vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m2_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst, - vuint64m4_t vector, - vuint64m1_t scalar, size_t vl) { - return vredmaxu_vs_u64m4_u64m1_m(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_m (vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m4_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst, - vuint64m8_t vector, - vuint64m1_t scalar, size_t vl) { - return vredmaxu_vs_u64m8_u64m1_m(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_m (vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m8_u64m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8mf8_i8m1_tu (vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8mf8_i8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8mf4_i8m1_tu (vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8mf4_i8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8mf2_i8m1_tu (vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8mf2_i8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m1_i8m1_tu (vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8m1_i8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m2_i8m1_tu (vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8m2_i8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m4_i8m1_tu (vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8m4_i8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m8_i8m1_tu (vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8m8_i8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16mf4_i16m1_tu (vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16mf4_i16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16mf2_i16m1_tu (vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16mf2_i16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m1_i16m1_tu (vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16m1_i16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m2_i16m1_tu (vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16m2_i16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m4_i16m1_tu (vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16m4_i16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m8_i16m1_tu (vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16m8_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32mf2_i32m1_tu(vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32mf2_i32m1_tu(merge, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32mf2_i32m1_tu (vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32mf2_i32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_tu( +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32mf2_u32m1_tu(merge, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m1_i32m1_tu (vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32m1_i32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_ta( +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32mf2_i32m1_ta(vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m2_i32m1_tu (vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32m2_i32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_ta( +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32mf2_u32m1_ta(vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m4_i32m1_tu (vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32m4_i32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_tum( +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32mf2_i32m1_tum(mask, merge, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m8_i32m1_tu (vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32m8_i32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_tum( +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m1_i64m1_tu (vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_vs_i64m1_i64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m2_i64m1_tu (vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_vs_i64m2_i64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m4_i64m1_tu (vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_vs_i64m4_i64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m8_i64m1_tu (vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_vs_i64m8_i64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_tu (vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8mf8_u8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_tu (vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8mf4_u8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_tu (vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8mf2_u8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8m1_u8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_tu (vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8m2_u8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_tu (vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8m4_u8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_tu (vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8m8_u8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_tu (vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16mf4_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_tu (vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16mf2_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m1_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_tu (vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m2_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_tu (vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m4_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_tu (vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m8_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32mf2_u32m1_tum(mask, merge, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tu (vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32mf2_u32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_tam( +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32mf2_i32m1_tam(mask, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m1_u32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_tam( +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32mf2_u32m1_tam(mask, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_tu (vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m2_u32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_tu (vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m4_u32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_tu (vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m8_u32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m1_u64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_tu (vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m2_u64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_tu (vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m4_u64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_tu (vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m8_u64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8mf8_i8m1_ta (vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8mf8_i8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8mf4_i8m1_ta (vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8mf4_i8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8mf2_i8m1_ta (vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8mf2_i8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m1_i8m1_ta (vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8m1_i8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m2_i8m1_ta (vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8m2_i8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m4_i8m1_ta (vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8m4_i8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m8_i8m1_ta (vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8m8_i8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16mf4_i16m1_ta (vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16mf4_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16mf2_i16m1_ta (vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16mf2_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m1_i16m1_ta (vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16m1_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m2_i16m1_ta (vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16m2_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m4_i16m1_ta (vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16m4_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m8_i16m1_ta (vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16m8_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32mf2_i32m1_ta (vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32mf2_i32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m1_i32m1_ta (vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32m1_i32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m2_i32m1_ta (vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32m2_i32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m4_i32m1_ta (vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32m4_i32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m8_i32m1_ta (vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32m8_i32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m1_i64m1_ta (vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_vs_i64m1_i64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m2_i64m1_ta (vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_vs_i64m2_i64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m4_i64m1_ta (vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_vs_i64m4_i64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m8_i64m1_ta (vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_vs_i64m8_i64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_ta (vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8mf8_u8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_ta (vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8mf4_u8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_ta (vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8mf2_u8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_ta (vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8m1_u8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_ta (vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8m2_u8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_ta (vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8m4_u8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_ta (vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8m8_u8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_ta (vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16mf4_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_ta (vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16mf2_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_ta (vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m1_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_ta (vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m2_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_ta (vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m4_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_ta (vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m8_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_ta (vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32mf2_u32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_ta (vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m1_u32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_ta (vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m2_u32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_ta (vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m4_u32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_ta (vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m8_u32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_ta (vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m1_u64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_ta (vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m2_u64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_ta (vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m4_u64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_ta (vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m8_u64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8mf8_i8m1_tum (vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8mf8_i8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8mf4_i8m1_tum (vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8mf4_i8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8mf2_i8m1_tum (vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8mf2_i8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m1_i8m1_tum (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8m1_i8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m2_i8m1_tum (vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8m2_i8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m4_i8m1_tum (vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8m4_i8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m8_i8m1_tum (vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8m8_i8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16mf4_i16m1_tum (vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16mf4_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16mf2_i16m1_tum (vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16mf2_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m1_i16m1_tum (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16m1_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m2_i16m1_tum (vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16m2_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m4_i16m1_tum (vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16m4_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m8_i16m1_tum (vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16m8_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32mf2_i32m1_tum (vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32mf2_i32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m1_i32m1_tum (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32m1_i32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m2_i32m1_tum (vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32m2_i32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m4_i32m1_tum (vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32m4_i32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m8_i32m1_tum (vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32m8_i32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m1_i64m1_tum (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_vs_i64m1_i64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m2_i64m1_tum (vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_vs_i64m2_i64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m4_i64m1_tum (vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_vs_i64m4_i64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m8_i64m1_tum (vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_vs_i64m8_i64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_tum (vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8mf8_u8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_tum (vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8mf4_u8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_tum (vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8mf2_u8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8m1_u8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_tum (vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8m2_u8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_tum (vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8m4_u8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_tum (vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8m8_u8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_tum (vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16mf4_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_tum (vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16mf2_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m1_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_tum (vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m2_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_tum (vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m4_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_tum (vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m8_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tum (vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32mf2_u32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m1_u32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_tum (vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m2_u32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_tum (vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m4_u32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_tum (vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m8_u32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m1_u64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_tum (vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m2_u64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_tum (vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m4_u64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_tum (vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m8_u64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8mf8_i8m1_tam (vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8mf8_i8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8mf4_i8m1_tam (vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8mf4_i8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8mf2_i8m1_tam (vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8mf2_i8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m1_i8m1_tam (vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8m1_i8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m2_i8m1_tam (vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8m2_i8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m4_i8m1_tam (vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8m4_i8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m8_i8m1_tam (vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8m8_i8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16mf4_i16m1_tam (vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16mf4_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16mf2_i16m1_tam (vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16mf2_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m1_i16m1_tam (vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16m1_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m2_i16m1_tam (vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16m2_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m4_i16m1_tam (vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16m4_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m8_i16m1_tam (vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16m8_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32mf2_i32m1_tam (vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32mf2_i32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m1_i32m1_tam (vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32m1_i32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m2_i32m1_tam (vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32m2_i32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m4_i32m1_tam (vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32m4_i32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m8_i32m1_tam (vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32m8_i32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m1_i64m1_tam (vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_vs_i64m1_i64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m2_i64m1_tam (vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_vs_i64m2_i64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m4_i64m1_tam (vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_vs_i64m4_i64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m8_i64m1_tam (vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_vs_i64m8_i64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_tam (vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8mf8_u8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_tam (vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8mf4_u8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_tam (vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8mf2_u8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_tam (vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8m1_u8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_tam (vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8m2_u8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_tam (vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8m4_u8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_tam (vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8m8_u8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_tam (vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16mf4_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_tam (vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16mf2_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_tam (vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m1_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_tam (vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m2_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_tam (vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m4_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_tam (vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m8_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tam (vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32mf2_u32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_tam (vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m1_u32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_tam (vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m2_u32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_tam (vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m4_u32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_tam (vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m8_u32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_tam (vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m1_u64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_tam (vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m2_u64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_tam (vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m4_u64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_tam (vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m8_u64m1_tam(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmin.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmin.c @@ -6,996 +6,2376 @@ // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector, - vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8mf8_i8m1(dst, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf8_i8m1 (vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8mf8_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector, - vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8mf4_i8m1(dst, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf4_i8m1 (vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8mf4_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector, - vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8mf2_i8m1(dst, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf2_i8m1 (vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8mf2_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector, - vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8m1_i8m1(dst, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m1_i8m1 (vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8m1_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector, - vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8m2_i8m1(dst, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m2_i8m1 (vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8m2_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector, - vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8m4_i8m1(dst, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m4_i8m1 (vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8m4_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector, - vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8m8_i8m1(dst, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m8_i8m1 (vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8m8_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector, - vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16mf4_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16mf4_i16m1 (vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16mf4_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector, - vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16mf2_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16mf2_i16m1 (vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16mf2_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector, - vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16m1_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m1_i16m1 (vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16m1_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector, - vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16m2_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m2_i16m1 (vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16m2_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector, - vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16m4_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m4_i16m1 (vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16m4_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector, - vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16m8_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m8_i16m1 (vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16m8_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector, - vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32mf2_i32m1(dst, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32mf2_i32m1 (vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32mf2_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector, - vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32m1_i32m1(dst, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m1_i32m1 (vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32m1_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector, - vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32m2_i32m1(dst, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m2_i32m1 (vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32m2_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector, - vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32m4_i32m1(dst, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m4_i32m1 (vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32m4_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector, - vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32m8_i32m1(dst, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m8_i32m1 (vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32m8_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector, - vint64m1_t scalar, size_t vl) { - return vredmin_vs_i64m1_i64m1(dst, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m1_i64m1 (vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_vs_i64m1_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector, - vint64m1_t scalar, size_t vl) { - return vredmin_vs_i64m2_i64m1(dst, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m2_i64m1 (vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_vs_i64m2_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector, - vint64m1_t scalar, size_t vl) { - return vredmin_vs_i64m4_i64m1(dst, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m4_i64m1 (vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_vs_i64m4_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector, - vint64m1_t scalar, size_t vl) { - return vredmin_vs_i64m8_i64m1(dst, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m8_i64m1 (vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_vs_i64m8_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector, - vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8mf8_u8m1(dst, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf8_u8m1 (vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8mf8_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector, - vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8mf4_u8m1(dst, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf4_u8m1 (vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8mf4_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector, - vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8mf2_u8m1(dst, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf2_u8m1 (vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8mf2_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector, - vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8m1_u8m1(dst, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m1_u8m1 (vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8m1_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector, - vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8m2_u8m1(dst, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m2_u8m1 (vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8m2_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector, - vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8m4_u8m1(dst, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m4_u8m1 (vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8m4_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector, - vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8m8_u8m1(dst, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m8_u8m1 (vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8m8_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16mf4_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16mf4_u16m1 (vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16mf4_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16mf2_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16mf2_u16m1 (vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16mf2_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector, - vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16m1_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m1_u16m1 (vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m1_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16m2_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m2_u16m1 (vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m2_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16m4_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m4_u16m1 (vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m4_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector, - vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16m8_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m8_u16m1 (vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m8_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32mf2_u32m1(dst, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32mf2_u32m1 (vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32mf2_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector, - vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32m1_u32m1(dst, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m1_u32m1 (vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m1_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32m2_u32m1(dst, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m2_u32m1 (vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m2_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector, - vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32m4_u32m1(dst, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m4_u32m1 (vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m4_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector, - vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32m8_u32m1(dst, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m8_u32m1 (vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m8_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector, - vuint64m1_t scalar, size_t vl) { - return vredminu_vs_u64m1_u64m1(dst, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m1_u64m1 (vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m1_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector, - vuint64m1_t scalar, size_t vl) { - return vredminu_vs_u64m2_u64m1(dst, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m2_u64m1 (vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m2_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector, - vuint64m1_t scalar, size_t vl) { - return vredminu_vs_u64m4_u64m1(dst, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m4_u64m1 (vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m4_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector, - vuint64m1_t scalar, size_t vl) { - return vredminu_vs_u64m8_u64m1(dst, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m8_u64m1 (vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m8_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst, - vint8mf8_t vector, vint8m1_t scalar, - size_t vl) { - return vredmin_vs_i8mf8_i8m1_m(mask, dst, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf8_i8m1_m (vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8mf8_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst, - vint8mf4_t vector, vint8m1_t scalar, - size_t vl) { - return vredmin_vs_i8mf4_i8m1_m(mask, dst, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf4_i8m1_m (vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8mf4_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst, - vint8mf2_t vector, vint8m1_t scalar, - size_t vl) { - return vredmin_vs_i8mf2_i8m1_m(mask, dst, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf2_i8m1_m (vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8mf2_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst, - vint8m1_t vector, vint8m1_t scalar, - size_t vl) { - return vredmin_vs_i8m1_i8m1_m(mask, dst, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m1_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8m1_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst, - vint8m2_t vector, vint8m1_t scalar, - size_t vl) { - return vredmin_vs_i8m2_i8m1_m(mask, dst, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m2_i8m1_m (vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8m2_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst, - vint8m4_t vector, vint8m1_t scalar, - size_t vl) { - return vredmin_vs_i8m4_i8m1_m(mask, dst, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m4_i8m1_m (vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8m4_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst, - vint8m8_t vector, vint8m1_t scalar, - size_t vl) { - return vredmin_vs_i8m8_i8m1_m(mask, dst, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m8_i8m1_m (vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8m8_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst, - vint16mf4_t vector, vint16m1_t scalar, - size_t vl) { - return vredmin_vs_i16mf4_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16mf4_i16m1_m (vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16mf4_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst, - vint16mf2_t vector, vint16m1_t scalar, - size_t vl) { - return vredmin_vs_i16mf2_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16mf2_i16m1_m (vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16mf2_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst, - vint16m1_t vector, vint16m1_t scalar, - size_t vl) { - return vredmin_vs_i16m1_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m1_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16m1_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst, - vint16m2_t vector, vint16m1_t scalar, - size_t vl) { - return vredmin_vs_i16m2_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m2_i16m1_m (vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16m2_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst, - vint16m4_t vector, vint16m1_t scalar, - size_t vl) { - return vredmin_vs_i16m4_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m4_i16m1_m (vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16m4_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst, - vint16m8_t vector, vint16m1_t scalar, - size_t vl) { - return vredmin_vs_i16m8_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m8_i16m1_m (vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16m8_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst, - vint32mf2_t vector, vint32m1_t scalar, - size_t vl) { - return vredmin_vs_i32mf2_i32m1_m(mask, dst, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32mf2_i32m1_m (vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32mf2_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst, - vint32m1_t vector, vint32m1_t scalar, - size_t vl) { - return vredmin_vs_i32m1_i32m1_m(mask, dst, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m1_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32m1_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst, - vint32m2_t vector, vint32m1_t scalar, - size_t vl) { - return vredmin_vs_i32m2_i32m1_m(mask, dst, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m2_i32m1_m (vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32m2_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst, - vint32m4_t vector, vint32m1_t scalar, - size_t vl) { - return vredmin_vs_i32m4_i32m1_m(mask, dst, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m4_i32m1_m (vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32m4_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst, - vint32m8_t vector, vint32m1_t scalar, - size_t vl) { - return vredmin_vs_i32m8_i32m1_m(mask, dst, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m8_i32m1_m (vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32m8_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst, - vint64m1_t vector, vint64m1_t scalar, - size_t vl) { - return vredmin_vs_i64m1_i64m1_m(mask, dst, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m1_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_vs_i64m1_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst, - vint64m2_t vector, vint64m1_t scalar, - size_t vl) { - return vredmin_vs_i64m2_i64m1_m(mask, dst, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m2_i64m1_m (vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_vs_i64m2_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst, - vint64m4_t vector, vint64m1_t scalar, - size_t vl) { - return vredmin_vs_i64m4_i64m1_m(mask, dst, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m4_i64m1_m (vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_vs_i64m4_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst, - vint64m8_t vector, vint64m1_t scalar, - size_t vl) { - return vredmin_vs_i64m8_i64m1_m(mask, dst, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m8_i64m1_m (vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_vs_i64m8_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst, - vuint8mf8_t vector, vuint8m1_t scalar, - size_t vl) { - return vredminu_vs_u8mf8_u8m1_m(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf8_u8m1_m (vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8mf8_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst, - vuint8mf4_t vector, vuint8m1_t scalar, - size_t vl) { - return vredminu_vs_u8mf4_u8m1_m(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf4_u8m1_m (vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8mf4_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst, - vuint8mf2_t vector, vuint8m1_t scalar, - size_t vl) { - return vredminu_vs_u8mf2_u8m1_m(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf2_u8m1_m (vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8mf2_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst, - vuint8m1_t vector, vuint8m1_t scalar, - size_t vl) { - return vredminu_vs_u8m1_u8m1_m(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m1_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8m1_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst, - vuint8m2_t vector, vuint8m1_t scalar, - size_t vl) { - return vredminu_vs_u8m2_u8m1_m(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m2_u8m1_m (vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8m2_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst, - vuint8m4_t vector, vuint8m1_t scalar, - size_t vl) { - return vredminu_vs_u8m4_u8m1_m(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m4_u8m1_m (vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8m4_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst, - vuint8m8_t vector, vuint8m1_t scalar, - size_t vl) { - return vredminu_vs_u8m8_u8m1_m(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m8_u8m1_m (vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8m8_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst, - vuint16mf4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16mf4_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16mf4_u16m1_m (vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16mf4_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst, - vuint16mf2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16mf2_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16mf2_u16m1_m (vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16mf2_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst, - vuint16m1_t vector, - vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16m1_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m1_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m1_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst, - vuint16m2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16m2_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m2_u16m1_m (vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m2_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst, - vuint16m4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16m4_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m4_u16m1_m (vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m4_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst, - vuint16m8_t vector, - vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16m8_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m8_u16m1_m (vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m8_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst, - vuint32mf2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32mf2_u32m1_m(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32mf2_u32m1_m (vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32mf2_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst, - vuint32m1_t vector, - vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32m1_u32m1_m(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m1_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m1_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst, - vuint32m2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32m2_u32m1_m(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m2_u32m1_m (vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m2_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst, - vuint32m4_t vector, - vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32m4_u32m1_m(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m4_u32m1_m (vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m4_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst, - vuint32m8_t vector, - vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32m8_u32m1_m(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m8_u32m1_m (vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m8_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst, - vuint64m1_t vector, - vuint64m1_t scalar, size_t vl) { - return vredminu_vs_u64m1_u64m1_m(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m1_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m1_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst, - vuint64m2_t vector, - vuint64m1_t scalar, size_t vl) { - return vredminu_vs_u64m2_u64m1_m(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m2_u64m1_m (vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m2_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst, - vuint64m4_t vector, - vuint64m1_t scalar, size_t vl) { - return vredminu_vs_u64m4_u64m1_m(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m4_u64m1_m (vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m4_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst, - vuint64m8_t vector, - vuint64m1_t scalar, size_t vl) { - return vredminu_vs_u64m8_u64m1_m(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m8_u64m1_m (vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m8_u64m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8mf8_i8m1_tu (vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8mf8_i8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8mf4_i8m1_tu (vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8mf4_i8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8mf2_i8m1_tu (vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8mf2_i8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m1_i8m1_tu (vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8m1_i8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m2_i8m1_tu (vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8m2_i8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m4_i8m1_tu (vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8m4_i8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m8_i8m1_tu (vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8m8_i8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16mf4_i16m1_tu (vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16mf4_i16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16mf2_i16m1_tu (vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16mf2_i16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m1_i16m1_tu (vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16m1_i16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m2_i16m1_tu (vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16m2_i16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m4_i16m1_tu (vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16m4_i16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m8_i16m1_tu (vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16m8_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32mf2_i32m1_tu(vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32mf2_i32m1_tu(merge, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32mf2_i32m1_tu (vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32mf2_i32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_tu( +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32mf2_u32m1_tu(merge, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m1_i32m1_tu (vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32m1_i32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_ta( +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32mf2_i32m1_ta(vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m2_i32m1_tu (vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32m2_i32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_ta( +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32mf2_u32m1_ta(vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m4_i32m1_tu (vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32m4_i32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_tum( +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32mf2_i32m1_tum(mask, merge, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m8_i32m1_tu (vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32m8_i32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_tum( +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m1_i64m1_tu (vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_vs_i64m1_i64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m2_i64m1_tu (vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_vs_i64m2_i64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m4_i64m1_tu (vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_vs_i64m4_i64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m8_i64m1_tu (vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_vs_i64m8_i64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8mf8_u8m1_tu (vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8mf8_u8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8mf4_u8m1_tu (vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8mf4_u8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8mf2_u8m1_tu (vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8mf2_u8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m1_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8m1_u8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m2_u8m1_tu (vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8m2_u8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m4_u8m1_tu (vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8m4_u8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m8_u8m1_tu (vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8m8_u8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16mf4_u16m1_tu (vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16mf4_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16mf2_u16m1_tu (vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16mf2_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m1_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m1_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m2_u16m1_tu (vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m2_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m4_u16m1_tu (vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m4_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m8_u16m1_tu (vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m8_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32mf2_u32m1_tum(mask, merge, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tu (vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32mf2_u32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_tam( +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32mf2_i32m1_tam(mask, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m1_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m1_u32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_tam( +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32mf2_u32m1_tam(mask, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m2_u32m1_tu (vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m2_u32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m4_u32m1_tu (vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m4_u32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m8_u32m1_tu (vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m8_u32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m1_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m1_u64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m2_u64m1_tu (vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m2_u64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m4_u64m1_tu (vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m4_u64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m8_u64m1_tu (vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m8_u64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8mf8_i8m1_ta (vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8mf8_i8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8mf4_i8m1_ta (vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8mf4_i8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8mf2_i8m1_ta (vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8mf2_i8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m1_i8m1_ta (vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8m1_i8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m2_i8m1_ta (vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8m2_i8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m4_i8m1_ta (vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8m4_i8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m8_i8m1_ta (vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8m8_i8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16mf4_i16m1_ta (vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16mf4_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16mf2_i16m1_ta (vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16mf2_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m1_i16m1_ta (vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16m1_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m2_i16m1_ta (vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16m2_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m4_i16m1_ta (vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16m4_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m8_i16m1_ta (vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16m8_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32mf2_i32m1_ta (vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32mf2_i32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m1_i32m1_ta (vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32m1_i32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m2_i32m1_ta (vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32m2_i32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m4_i32m1_ta (vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32m4_i32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m8_i32m1_ta (vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32m8_i32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m1_i64m1_ta (vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_vs_i64m1_i64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m2_i64m1_ta (vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_vs_i64m2_i64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m4_i64m1_ta (vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_vs_i64m4_i64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m8_i64m1_ta (vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_vs_i64m8_i64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8mf8_u8m1_ta (vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8mf8_u8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8mf4_u8m1_ta (vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8mf4_u8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8mf2_u8m1_ta (vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8mf2_u8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m1_u8m1_ta (vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8m1_u8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m2_u8m1_ta (vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8m2_u8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m4_u8m1_ta (vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8m4_u8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m8_u8m1_ta (vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8m8_u8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16mf4_u16m1_ta (vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16mf4_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16mf2_u16m1_ta (vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16mf2_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m1_u16m1_ta (vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m1_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m2_u16m1_ta (vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m2_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m4_u16m1_ta (vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m4_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m8_u16m1_ta (vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m8_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32mf2_u32m1_ta (vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32mf2_u32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m1_u32m1_ta (vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m1_u32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m2_u32m1_ta (vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m2_u32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m4_u32m1_ta (vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m4_u32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m8_u32m1_ta (vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m8_u32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m1_u64m1_ta (vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m1_u64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m2_u64m1_ta (vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m2_u64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m4_u64m1_ta (vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m4_u64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m8_u64m1_ta (vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m8_u64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8mf8_i8m1_tum (vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8mf8_i8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8mf4_i8m1_tum (vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8mf4_i8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8mf2_i8m1_tum (vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8mf2_i8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m1_i8m1_tum (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8m1_i8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m2_i8m1_tum (vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8m2_i8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m4_i8m1_tum (vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8m4_i8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m8_i8m1_tum (vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8m8_i8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16mf4_i16m1_tum (vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16mf4_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16mf2_i16m1_tum (vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16mf2_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m1_i16m1_tum (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16m1_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m2_i16m1_tum (vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16m2_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m4_i16m1_tum (vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16m4_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m8_i16m1_tum (vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16m8_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32mf2_i32m1_tum (vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32mf2_i32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m1_i32m1_tum (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32m1_i32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m2_i32m1_tum (vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32m2_i32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m4_i32m1_tum (vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32m4_i32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m8_i32m1_tum (vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32m8_i32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m1_i64m1_tum (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_vs_i64m1_i64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m2_i64m1_tum (vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_vs_i64m2_i64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m4_i64m1_tum (vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_vs_i64m4_i64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m8_i64m1_tum (vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_vs_i64m8_i64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8mf8_u8m1_tum (vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8mf8_u8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8mf4_u8m1_tum (vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8mf4_u8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8mf2_u8m1_tum (vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8mf2_u8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m1_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8m1_u8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m2_u8m1_tum (vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8m2_u8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m4_u8m1_tum (vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8m4_u8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m8_u8m1_tum (vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8m8_u8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16mf4_u16m1_tum (vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16mf4_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16mf2_u16m1_tum (vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16mf2_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m1_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m1_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m2_u16m1_tum (vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m2_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m4_u16m1_tum (vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m4_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m8_u16m1_tum (vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m8_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tum (vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32mf2_u32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m1_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m1_u32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m2_u32m1_tum (vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m2_u32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m4_u32m1_tum (vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m4_u32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m8_u32m1_tum (vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m8_u32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m1_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m1_u64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m2_u64m1_tum (vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m2_u64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m4_u64m1_tum (vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m4_u64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m8_u64m1_tum (vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m8_u64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8mf8_i8m1_tam (vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8mf8_i8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8mf4_i8m1_tam (vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8mf4_i8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8mf2_i8m1_tam (vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8mf2_i8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m1_i8m1_tam (vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8m1_i8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m2_i8m1_tam (vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8m2_i8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m4_i8m1_tam (vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8m4_i8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m8_i8m1_tam (vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8m8_i8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16mf4_i16m1_tam (vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16mf4_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16mf2_i16m1_tam (vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16mf2_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m1_i16m1_tam (vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16m1_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m2_i16m1_tam (vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16m2_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m4_i16m1_tam (vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16m4_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m8_i16m1_tam (vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16m8_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32mf2_i32m1_tam (vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32mf2_i32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m1_i32m1_tam (vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32m1_i32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m2_i32m1_tam (vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32m2_i32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m4_i32m1_tam (vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32m4_i32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m8_i32m1_tam (vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32m8_i32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m1_i64m1_tam (vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_vs_i64m1_i64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m2_i64m1_tam (vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_vs_i64m2_i64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m4_i64m1_tam (vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_vs_i64m4_i64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m8_i64m1_tam (vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_vs_i64m8_i64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8mf8_u8m1_tam (vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8mf8_u8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8mf4_u8m1_tam (vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8mf4_u8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8mf2_u8m1_tam (vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8mf2_u8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m1_u8m1_tam (vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8m1_u8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m2_u8m1_tam (vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8m2_u8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m4_u8m1_tam (vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8m4_u8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m8_u8m1_tam (vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8m8_u8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16mf4_u16m1_tam (vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16mf4_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16mf2_u16m1_tam (vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16mf2_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m1_u16m1_tam (vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m1_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m2_u16m1_tam (vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m2_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m4_u16m1_tam (vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m4_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m8_u16m1_tam (vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m8_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tam (vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32mf2_u32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m1_u32m1_tam (vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m1_u32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m2_u32m1_tam (vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m2_u32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m4_u32m1_tam (vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m4_u32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m8_u32m1_tam (vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m8_u32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m1_u64m1_tam (vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m1_u64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m2_u64m1_tam (vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m2_u64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m4_u64m1_tam (vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m4_u64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m8_u64m1_tam (vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m8_u64m1_tam(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredor.c @@ -6,996 +6,2376 @@ // CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector, - vint8m1_t scalar, size_t vl) { - return vredor_vs_i8mf8_i8m1(dst, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf8_i8m1 (vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8mf8_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector, - vint8m1_t scalar, size_t vl) { - return vredor_vs_i8mf4_i8m1(dst, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf4_i8m1 (vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8mf4_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector, - vint8m1_t scalar, size_t vl) { - return vredor_vs_i8mf2_i8m1(dst, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf2_i8m1 (vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8mf2_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector, - vint8m1_t scalar, size_t vl) { - return vredor_vs_i8m1_i8m1(dst, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m1_i8m1 (vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8m1_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector, - vint8m1_t scalar, size_t vl) { - return vredor_vs_i8m2_i8m1(dst, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m2_i8m1 (vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8m2_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector, - vint8m1_t scalar, size_t vl) { - return vredor_vs_i8m4_i8m1(dst, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m4_i8m1 (vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8m4_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector, - vint8m1_t scalar, size_t vl) { - return vredor_vs_i8m8_i8m1(dst, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m8_i8m1 (vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8m8_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector, - vint16m1_t scalar, size_t vl) { - return vredor_vs_i16mf4_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16mf4_i16m1 (vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16mf4_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector, - vint16m1_t scalar, size_t vl) { - return vredor_vs_i16mf2_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16mf2_i16m1 (vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16mf2_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector, - vint16m1_t scalar, size_t vl) { - return vredor_vs_i16m1_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m1_i16m1 (vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16m1_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector, - vint16m1_t scalar, size_t vl) { - return vredor_vs_i16m2_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m2_i16m1 (vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16m2_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector, - vint16m1_t scalar, size_t vl) { - return vredor_vs_i16m4_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m4_i16m1 (vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16m4_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector, - vint16m1_t scalar, size_t vl) { - return vredor_vs_i16m8_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m8_i16m1 (vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16m8_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector, - vint32m1_t scalar, size_t vl) { - return vredor_vs_i32mf2_i32m1(dst, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32mf2_i32m1 (vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32mf2_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector, - vint32m1_t scalar, size_t vl) { - return vredor_vs_i32m1_i32m1(dst, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m1_i32m1 (vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32m1_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector, - vint32m1_t scalar, size_t vl) { - return vredor_vs_i32m2_i32m1(dst, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m2_i32m1 (vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32m2_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector, - vint32m1_t scalar, size_t vl) { - return vredor_vs_i32m4_i32m1(dst, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m4_i32m1 (vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32m4_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector, - vint32m1_t scalar, size_t vl) { - return vredor_vs_i32m8_i32m1(dst, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m8_i32m1 (vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32m8_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector, - vint64m1_t scalar, size_t vl) { - return vredor_vs_i64m1_i64m1(dst, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m1_i64m1 (vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredor_vs_i64m1_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector, - vint64m1_t scalar, size_t vl) { - return vredor_vs_i64m2_i64m1(dst, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m2_i64m1 (vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredor_vs_i64m2_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector, - vint64m1_t scalar, size_t vl) { - return vredor_vs_i64m4_i64m1(dst, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m4_i64m1 (vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredor_vs_i64m4_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector, - vint64m1_t scalar, size_t vl) { - return vredor_vs_i64m8_i64m1(dst, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m8_i64m1 (vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredor_vs_i64m8_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector, - vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8mf8_u8m1(dst, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf8_u8m1 (vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8mf8_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector, - vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8mf4_u8m1(dst, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf4_u8m1 (vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8mf4_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector, - vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8mf2_u8m1(dst, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf2_u8m1 (vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8mf2_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector, - vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8m1_u8m1(dst, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m1_u8m1 (vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8m1_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector, - vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8m2_u8m1(dst, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m2_u8m1 (vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8m2_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector, - vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8m4_u8m1(dst, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m4_u8m1 (vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8m4_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector, - vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8m8_u8m1(dst, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m8_u8m1 (vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8m8_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16mf4_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16mf4_u16m1 (vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16mf4_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16mf2_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16mf2_u16m1 (vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16mf2_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector, - vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16m1_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m1_u16m1 (vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16m1_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16m2_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m2_u16m1 (vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16m2_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16m4_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m4_u16m1 (vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16m4_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector, - vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16m8_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m8_u16m1 (vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16m8_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32mf2_u32m1(dst, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32mf2_u32m1 (vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32mf2_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector, - vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32m1_u32m1(dst, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m1_u32m1 (vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32m1_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32m2_u32m1(dst, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m2_u32m1 (vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32m2_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector, - vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32m4_u32m1(dst, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m4_u32m1 (vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32m4_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector, - vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32m8_u32m1(dst, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m8_u32m1 (vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32m8_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector, - vuint64m1_t scalar, size_t vl) { - return vredor_vs_u64m1_u64m1(dst, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m1_u64m1 (vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_vs_u64m1_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector, - vuint64m1_t scalar, size_t vl) { - return vredor_vs_u64m2_u64m1(dst, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m2_u64m1 (vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_vs_u64m2_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector, - vuint64m1_t scalar, size_t vl) { - return vredor_vs_u64m4_u64m1(dst, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m4_u64m1 (vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_vs_u64m4_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector, - vuint64m1_t scalar, size_t vl) { - return vredor_vs_u64m8_u64m1(dst, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m8_u64m1 (vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_vs_u64m8_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst, - vint8mf8_t vector, vint8m1_t scalar, - size_t vl) { - return vredor_vs_i8mf8_i8m1_m(mask, dst, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf8_i8m1_m (vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8mf8_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst, - vint8mf4_t vector, vint8m1_t scalar, - size_t vl) { - return vredor_vs_i8mf4_i8m1_m(mask, dst, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf4_i8m1_m (vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8mf4_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst, - vint8mf2_t vector, vint8m1_t scalar, - size_t vl) { - return vredor_vs_i8mf2_i8m1_m(mask, dst, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf2_i8m1_m (vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8mf2_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst, - vint8m1_t vector, vint8m1_t scalar, - size_t vl) { - return vredor_vs_i8m1_i8m1_m(mask, dst, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m1_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8m1_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst, - vint8m2_t vector, vint8m1_t scalar, - size_t vl) { - return vredor_vs_i8m2_i8m1_m(mask, dst, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m2_i8m1_m (vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8m2_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst, - vint8m4_t vector, vint8m1_t scalar, - size_t vl) { - return vredor_vs_i8m4_i8m1_m(mask, dst, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m4_i8m1_m (vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8m4_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst, - vint8m8_t vector, vint8m1_t scalar, - size_t vl) { - return vredor_vs_i8m8_i8m1_m(mask, dst, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m8_i8m1_m (vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8m8_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst, - vint16mf4_t vector, vint16m1_t scalar, - size_t vl) { - return vredor_vs_i16mf4_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16mf4_i16m1_m (vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16mf4_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst, - vint16mf2_t vector, vint16m1_t scalar, - size_t vl) { - return vredor_vs_i16mf2_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16mf2_i16m1_m (vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16mf2_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst, - vint16m1_t vector, vint16m1_t scalar, - size_t vl) { - return vredor_vs_i16m1_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m1_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16m1_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst, - vint16m2_t vector, vint16m1_t scalar, - size_t vl) { - return vredor_vs_i16m2_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m2_i16m1_m (vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16m2_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst, - vint16m4_t vector, vint16m1_t scalar, - size_t vl) { - return vredor_vs_i16m4_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m4_i16m1_m (vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16m4_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst, - vint16m8_t vector, vint16m1_t scalar, - size_t vl) { - return vredor_vs_i16m8_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m8_i16m1_m (vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16m8_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst, - vint32mf2_t vector, vint32m1_t scalar, - size_t vl) { - return vredor_vs_i32mf2_i32m1_m(mask, dst, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32mf2_i32m1_m (vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32mf2_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst, - vint32m1_t vector, vint32m1_t scalar, - size_t vl) { - return vredor_vs_i32m1_i32m1_m(mask, dst, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m1_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32m1_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst, - vint32m2_t vector, vint32m1_t scalar, - size_t vl) { - return vredor_vs_i32m2_i32m1_m(mask, dst, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m2_i32m1_m (vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32m2_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst, - vint32m4_t vector, vint32m1_t scalar, - size_t vl) { - return vredor_vs_i32m4_i32m1_m(mask, dst, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m4_i32m1_m (vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32m4_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst, - vint32m8_t vector, vint32m1_t scalar, - size_t vl) { - return vredor_vs_i32m8_i32m1_m(mask, dst, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m8_i32m1_m (vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32m8_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst, - vint64m1_t vector, vint64m1_t scalar, - size_t vl) { - return vredor_vs_i64m1_i64m1_m(mask, dst, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m1_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredor_vs_i64m1_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst, - vint64m2_t vector, vint64m1_t scalar, - size_t vl) { - return vredor_vs_i64m2_i64m1_m(mask, dst, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m2_i64m1_m (vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredor_vs_i64m2_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst, - vint64m4_t vector, vint64m1_t scalar, - size_t vl) { - return vredor_vs_i64m4_i64m1_m(mask, dst, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m4_i64m1_m (vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredor_vs_i64m4_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst, - vint64m8_t vector, vint64m1_t scalar, - size_t vl) { - return vredor_vs_i64m8_i64m1_m(mask, dst, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m8_i64m1_m (vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredor_vs_i64m8_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst, - vuint8mf8_t vector, vuint8m1_t scalar, - size_t vl) { - return vredor_vs_u8mf8_u8m1_m(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf8_u8m1_m (vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8mf8_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst, - vuint8mf4_t vector, vuint8m1_t scalar, - size_t vl) { - return vredor_vs_u8mf4_u8m1_m(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf4_u8m1_m (vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8mf4_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst, - vuint8mf2_t vector, vuint8m1_t scalar, - size_t vl) { - return vredor_vs_u8mf2_u8m1_m(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf2_u8m1_m (vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8mf2_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst, - vuint8m1_t vector, vuint8m1_t scalar, - size_t vl) { - return vredor_vs_u8m1_u8m1_m(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m1_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8m1_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst, - vuint8m2_t vector, vuint8m1_t scalar, - size_t vl) { - return vredor_vs_u8m2_u8m1_m(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m2_u8m1_m (vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8m2_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst, - vuint8m4_t vector, vuint8m1_t scalar, - size_t vl) { - return vredor_vs_u8m4_u8m1_m(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m4_u8m1_m (vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8m4_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst, - vuint8m8_t vector, vuint8m1_t scalar, - size_t vl) { - return vredor_vs_u8m8_u8m1_m(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m8_u8m1_m (vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8m8_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst, - vuint16mf4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16mf4_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16mf4_u16m1_m (vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16mf4_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst, - vuint16mf2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16mf2_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16mf2_u16m1_m (vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16mf2_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst, - vuint16m1_t vector, vuint16m1_t scalar, - size_t vl) { - return vredor_vs_u16m1_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m1_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16m1_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst, - vuint16m2_t vector, vuint16m1_t scalar, - size_t vl) { - return vredor_vs_u16m2_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m2_u16m1_m (vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16m2_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst, - vuint16m4_t vector, vuint16m1_t scalar, - size_t vl) { - return vredor_vs_u16m4_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m4_u16m1_m (vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16m4_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst, - vuint16m8_t vector, vuint16m1_t scalar, - size_t vl) { - return vredor_vs_u16m8_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m8_u16m1_m (vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16m8_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst, - vuint32mf2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32mf2_u32m1_m(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32mf2_u32m1_m (vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32mf2_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst, - vuint32m1_t vector, vuint32m1_t scalar, - size_t vl) { - return vredor_vs_u32m1_u32m1_m(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m1_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32m1_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst, - vuint32m2_t vector, vuint32m1_t scalar, - size_t vl) { - return vredor_vs_u32m2_u32m1_m(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m2_u32m1_m (vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32m2_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst, - vuint32m4_t vector, vuint32m1_t scalar, - size_t vl) { - return vredor_vs_u32m4_u32m1_m(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m4_u32m1_m (vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32m4_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst, - vuint32m8_t vector, vuint32m1_t scalar, - size_t vl) { - return vredor_vs_u32m8_u32m1_m(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m8_u32m1_m (vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32m8_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst, - vuint64m1_t vector, vuint64m1_t scalar, - size_t vl) { - return vredor_vs_u64m1_u64m1_m(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m1_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_vs_u64m1_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst, - vuint64m2_t vector, vuint64m1_t scalar, - size_t vl) { - return vredor_vs_u64m2_u64m1_m(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m2_u64m1_m (vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_vs_u64m2_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst, - vuint64m4_t vector, vuint64m1_t scalar, - size_t vl) { - return vredor_vs_u64m4_u64m1_m(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m4_u64m1_m (vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_vs_u64m4_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst, - vuint64m8_t vector, vuint64m1_t scalar, - size_t vl) { - return vredor_vs_u64m8_u64m1_m(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m8_u64m1_m (vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_vs_u64m8_u64m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8mf8_i8m1_tu (vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8mf8_i8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8mf4_i8m1_tu (vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8mf4_i8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8mf2_i8m1_tu (vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8mf2_i8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m1_i8m1_tu (vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8m1_i8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m2_i8m1_tu (vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8m2_i8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m4_i8m1_tu (vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8m4_i8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m8_i8m1_tu (vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8m8_i8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16mf4_i16m1_tu (vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16mf4_i16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16mf2_i16m1_tu (vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16mf2_i16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m1_i16m1_tu (vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16m1_i16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m2_i16m1_tu (vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16m2_i16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m4_i16m1_tu (vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16m4_i16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m8_i16m1_tu (vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16m8_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32mf2_i32m1_tu(vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredor_vs_i32mf2_i32m1_tu(merge, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32mf2_i32m1_tu (vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32mf2_i32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_tu( +// CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32mf2_u32m1_tu(merge, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m1_i32m1_tu (vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32m1_i32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_ta( +// CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredor_vs_i32mf2_i32m1_ta(vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m2_i32m1_tu (vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32m2_i32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_ta( +// CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32mf2_u32m1_ta(vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m4_i32m1_tu (vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32m4_i32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_tum( +// CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredor_vs_i32mf2_i32m1_tum(mask, merge, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m8_i32m1_tu (vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32m8_i32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_tum( +// CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m1_i64m1_tu (vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredor_vs_i64m1_i64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m2_i64m1_tu (vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredor_vs_i64m2_i64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m4_i64m1_tu (vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredor_vs_i64m4_i64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m8_i64m1_tu (vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredor_vs_i64m8_i64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8mf8_u8m1_tu (vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8mf8_u8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8mf4_u8m1_tu (vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8mf4_u8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8mf2_u8m1_tu (vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8mf2_u8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m1_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8m1_u8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m2_u8m1_tu (vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8m2_u8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m4_u8m1_tu (vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8m4_u8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m8_u8m1_tu (vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8m8_u8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16mf4_u16m1_tu (vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16mf4_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16mf2_u16m1_tu (vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16mf2_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m1_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16m1_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m2_u16m1_tu (vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16m2_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m4_u16m1_tu (vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16m4_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m8_u16m1_tu (vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16m8_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32mf2_u32m1_tum(mask, merge, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32mf2_u32m1_tu (vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32mf2_u32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_tam( +// CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredor_vs_i32mf2_i32m1_tam(mask, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m1_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32m1_u32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_tam( +// CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32mf2_u32m1_tam(mask, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m2_u32m1_tu (vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32m2_u32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m4_u32m1_tu (vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32m4_u32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m8_u32m1_tu (vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32m8_u32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m1_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_vs_u64m1_u64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m2_u64m1_tu (vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_vs_u64m2_u64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m4_u64m1_tu (vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_vs_u64m4_u64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m8_u64m1_tu (vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_vs_u64m8_u64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8mf8_i8m1_ta (vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8mf8_i8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8mf4_i8m1_ta (vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8mf4_i8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8mf2_i8m1_ta (vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8mf2_i8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m1_i8m1_ta (vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8m1_i8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m2_i8m1_ta (vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8m2_i8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m4_i8m1_ta (vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8m4_i8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m8_i8m1_ta (vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8m8_i8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16mf4_i16m1_ta (vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16mf4_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16mf2_i16m1_ta (vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16mf2_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m1_i16m1_ta (vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16m1_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m2_i16m1_ta (vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16m2_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m4_i16m1_ta (vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16m4_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m8_i16m1_ta (vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16m8_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32mf2_i32m1_ta (vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32mf2_i32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m1_i32m1_ta (vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32m1_i32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m2_i32m1_ta (vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32m2_i32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m4_i32m1_ta (vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32m4_i32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m8_i32m1_ta (vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32m8_i32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m1_i64m1_ta (vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredor_vs_i64m1_i64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m2_i64m1_ta (vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredor_vs_i64m2_i64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m4_i64m1_ta (vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredor_vs_i64m4_i64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m8_i64m1_ta (vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredor_vs_i64m8_i64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8mf8_u8m1_ta (vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8mf8_u8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8mf4_u8m1_ta (vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8mf4_u8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8mf2_u8m1_ta (vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8mf2_u8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m1_u8m1_ta (vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8m1_u8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m2_u8m1_ta (vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8m2_u8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m4_u8m1_ta (vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8m4_u8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m8_u8m1_ta (vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8m8_u8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16mf4_u16m1_ta (vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16mf4_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16mf2_u16m1_ta (vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16mf2_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m1_u16m1_ta (vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16m1_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m2_u16m1_ta (vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16m2_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m4_u16m1_ta (vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16m4_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m8_u16m1_ta (vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16m8_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32mf2_u32m1_ta (vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32mf2_u32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m1_u32m1_ta (vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32m1_u32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m2_u32m1_ta (vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32m2_u32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m4_u32m1_ta (vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32m4_u32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m8_u32m1_ta (vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32m8_u32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m1_u64m1_ta (vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_vs_u64m1_u64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m2_u64m1_ta (vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_vs_u64m2_u64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m4_u64m1_ta (vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_vs_u64m4_u64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m8_u64m1_ta (vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_vs_u64m8_u64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8mf8_i8m1_tum (vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8mf8_i8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8mf4_i8m1_tum (vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8mf4_i8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8mf2_i8m1_tum (vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8mf2_i8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m1_i8m1_tum (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8m1_i8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m2_i8m1_tum (vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8m2_i8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m4_i8m1_tum (vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8m4_i8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m8_i8m1_tum (vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8m8_i8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16mf4_i16m1_tum (vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16mf4_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16mf2_i16m1_tum (vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16mf2_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m1_i16m1_tum (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16m1_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m2_i16m1_tum (vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16m2_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m4_i16m1_tum (vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16m4_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m8_i16m1_tum (vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16m8_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32mf2_i32m1_tum (vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32mf2_i32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m1_i32m1_tum (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32m1_i32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m2_i32m1_tum (vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32m2_i32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m4_i32m1_tum (vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32m4_i32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m8_i32m1_tum (vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32m8_i32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m1_i64m1_tum (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredor_vs_i64m1_i64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m2_i64m1_tum (vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredor_vs_i64m2_i64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m4_i64m1_tum (vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredor_vs_i64m4_i64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m8_i64m1_tum (vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredor_vs_i64m8_i64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8mf8_u8m1_tum (vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8mf8_u8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8mf4_u8m1_tum (vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8mf4_u8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8mf2_u8m1_tum (vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8mf2_u8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m1_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8m1_u8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m2_u8m1_tum (vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8m2_u8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m4_u8m1_tum (vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8m4_u8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m8_u8m1_tum (vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8m8_u8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16mf4_u16m1_tum (vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16mf4_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16mf2_u16m1_tum (vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16mf2_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m1_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16m1_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m2_u16m1_tum (vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16m2_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m4_u16m1_tum (vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16m4_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m8_u16m1_tum (vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16m8_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32mf2_u32m1_tum (vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32mf2_u32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m1_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32m1_u32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m2_u32m1_tum (vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32m2_u32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m4_u32m1_tum (vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32m4_u32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m8_u32m1_tum (vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32m8_u32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m1_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_vs_u64m1_u64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m2_u64m1_tum (vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_vs_u64m2_u64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m4_u64m1_tum (vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_vs_u64m4_u64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m8_u64m1_tum (vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_vs_u64m8_u64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8mf8_i8m1_tam (vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8mf8_i8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8mf4_i8m1_tam (vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8mf4_i8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8mf2_i8m1_tam (vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8mf2_i8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m1_i8m1_tam (vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8m1_i8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m2_i8m1_tam (vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8m2_i8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m4_i8m1_tam (vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8m4_i8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m8_i8m1_tam (vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8m8_i8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16mf4_i16m1_tam (vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16mf4_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16mf2_i16m1_tam (vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16mf2_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m1_i16m1_tam (vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16m1_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m2_i16m1_tam (vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16m2_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m4_i16m1_tam (vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16m4_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m8_i16m1_tam (vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16m8_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32mf2_i32m1_tam (vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32mf2_i32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m1_i32m1_tam (vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32m1_i32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m2_i32m1_tam (vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32m2_i32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m4_i32m1_tam (vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32m4_i32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m8_i32m1_tam (vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32m8_i32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m1_i64m1_tam (vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredor_vs_i64m1_i64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m2_i64m1_tam (vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredor_vs_i64m2_i64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m4_i64m1_tam (vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredor_vs_i64m4_i64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m8_i64m1_tam (vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredor_vs_i64m8_i64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8mf8_u8m1_tam (vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8mf8_u8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8mf4_u8m1_tam (vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8mf4_u8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8mf2_u8m1_tam (vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8mf2_u8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m1_u8m1_tam (vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8m1_u8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m2_u8m1_tam (vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8m2_u8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m4_u8m1_tam (vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8m4_u8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m8_u8m1_tam (vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8m8_u8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16mf4_u16m1_tam (vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16mf4_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16mf2_u16m1_tam (vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16mf2_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m1_u16m1_tam (vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16m1_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m2_u16m1_tam (vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16m2_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m4_u16m1_tam (vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16m4_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m8_u16m1_tam (vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16m8_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32mf2_u32m1_tam (vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32mf2_u32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m1_u32m1_tam (vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32m1_u32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m2_u32m1_tam (vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32m2_u32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m4_u32m1_tam (vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32m4_u32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m8_u32m1_tam (vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32m8_u32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m1_u64m1_tam (vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_vs_u64m1_u64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m2_u64m1_tam (vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_vs_u64m2_u64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m4_u64m1_tam (vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_vs_u64m4_u64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m8_u64m1_tam (vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_vs_u64m8_u64m1_tam(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredsum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredsum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredsum.c @@ -6,996 +6,2376 @@ // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector, - vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8mf8_i8m1(dst, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf8_i8m1 (vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8mf8_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector, - vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8mf4_i8m1(dst, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf4_i8m1 (vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8mf4_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector, - vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8mf2_i8m1(dst, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf2_i8m1 (vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8mf2_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector, - vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8m1_i8m1(dst, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m1_i8m1 (vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8m1_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector, - vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8m2_i8m1(dst, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m2_i8m1 (vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8m2_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector, - vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8m4_i8m1(dst, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m4_i8m1 (vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8m4_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector, - vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8m8_i8m1(dst, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m8_i8m1 (vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8m8_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector, - vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16mf4_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16mf4_i16m1 (vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16mf4_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector, - vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16mf2_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16mf2_i16m1 (vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16mf2_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector, - vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16m1_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m1_i16m1 (vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16m1_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector, - vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16m2_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m2_i16m1 (vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16m2_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector, - vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16m4_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m4_i16m1 (vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16m4_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector, - vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16m8_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m8_i16m1 (vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16m8_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector, - vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32mf2_i32m1(dst, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32mf2_i32m1 (vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32mf2_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector, - vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32m1_i32m1(dst, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m1_i32m1 (vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32m1_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector, - vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32m2_i32m1(dst, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m2_i32m1 (vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32m2_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector, - vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32m4_i32m1(dst, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m4_i32m1 (vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32m4_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector, - vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32m8_i32m1(dst, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m8_i32m1 (vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32m8_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector, - vint64m1_t scalar, size_t vl) { - return vredsum_vs_i64m1_i64m1(dst, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m1_i64m1 (vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_vs_i64m1_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector, - vint64m1_t scalar, size_t vl) { - return vredsum_vs_i64m2_i64m1(dst, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m2_i64m1 (vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_vs_i64m2_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector, - vint64m1_t scalar, size_t vl) { - return vredsum_vs_i64m4_i64m1(dst, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m4_i64m1 (vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_vs_i64m4_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector, - vint64m1_t scalar, size_t vl) { - return vredsum_vs_i64m8_i64m1(dst, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m8_i64m1 (vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_vs_i64m8_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector, - vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8mf8_u8m1(dst, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf8_u8m1 (vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8mf8_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector, - vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8mf4_u8m1(dst, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf4_u8m1 (vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8mf4_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector, - vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8mf2_u8m1(dst, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf2_u8m1 (vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8mf2_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector, - vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8m1_u8m1(dst, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m1_u8m1 (vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8m1_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector, - vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8m2_u8m1(dst, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m2_u8m1 (vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8m2_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector, - vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8m4_u8m1(dst, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m4_u8m1 (vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8m4_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector, - vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8m8_u8m1(dst, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m8_u8m1 (vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8m8_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16mf4_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16mf4_u16m1 (vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16mf4_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16mf2_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16mf2_u16m1 (vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16mf2_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector, - vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16m1_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m1_u16m1 (vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m1_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16m2_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m2_u16m1 (vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m2_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16m4_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m4_u16m1 (vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m4_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector, - vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16m8_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m8_u16m1 (vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m8_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32mf2_u32m1(dst, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32mf2_u32m1 (vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32mf2_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector, - vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32m1_u32m1(dst, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m1_u32m1 (vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m1_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32m2_u32m1(dst, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m2_u32m1 (vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m2_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector, - vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32m4_u32m1(dst, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m4_u32m1 (vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m4_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector, - vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32m8_u32m1(dst, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m8_u32m1 (vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m8_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector, - vuint64m1_t scalar, size_t vl) { - return vredsum_vs_u64m1_u64m1(dst, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m1_u64m1 (vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m1_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector, - vuint64m1_t scalar, size_t vl) { - return vredsum_vs_u64m2_u64m1(dst, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m2_u64m1 (vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m2_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector, - vuint64m1_t scalar, size_t vl) { - return vredsum_vs_u64m4_u64m1(dst, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m4_u64m1 (vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m4_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector, - vuint64m1_t scalar, size_t vl) { - return vredsum_vs_u64m8_u64m1(dst, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m8_u64m1 (vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m8_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst, - vint8mf8_t vector, vint8m1_t scalar, - size_t vl) { - return vredsum_vs_i8mf8_i8m1_m(mask, dst, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf8_i8m1_m (vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8mf8_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst, - vint8mf4_t vector, vint8m1_t scalar, - size_t vl) { - return vredsum_vs_i8mf4_i8m1_m(mask, dst, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf4_i8m1_m (vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8mf4_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst, - vint8mf2_t vector, vint8m1_t scalar, - size_t vl) { - return vredsum_vs_i8mf2_i8m1_m(mask, dst, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf2_i8m1_m (vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8mf2_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst, - vint8m1_t vector, vint8m1_t scalar, - size_t vl) { - return vredsum_vs_i8m1_i8m1_m(mask, dst, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m1_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8m1_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst, - vint8m2_t vector, vint8m1_t scalar, - size_t vl) { - return vredsum_vs_i8m2_i8m1_m(mask, dst, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m2_i8m1_m (vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8m2_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst, - vint8m4_t vector, vint8m1_t scalar, - size_t vl) { - return vredsum_vs_i8m4_i8m1_m(mask, dst, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m4_i8m1_m (vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8m4_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst, - vint8m8_t vector, vint8m1_t scalar, - size_t vl) { - return vredsum_vs_i8m8_i8m1_m(mask, dst, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m8_i8m1_m (vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8m8_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst, - vint16mf4_t vector, vint16m1_t scalar, - size_t vl) { - return vredsum_vs_i16mf4_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16mf4_i16m1_m (vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16mf4_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst, - vint16mf2_t vector, vint16m1_t scalar, - size_t vl) { - return vredsum_vs_i16mf2_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16mf2_i16m1_m (vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16mf2_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst, - vint16m1_t vector, vint16m1_t scalar, - size_t vl) { - return vredsum_vs_i16m1_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m1_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16m1_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst, - vint16m2_t vector, vint16m1_t scalar, - size_t vl) { - return vredsum_vs_i16m2_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m2_i16m1_m (vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16m2_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst, - vint16m4_t vector, vint16m1_t scalar, - size_t vl) { - return vredsum_vs_i16m4_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m4_i16m1_m (vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16m4_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst, - vint16m8_t vector, vint16m1_t scalar, - size_t vl) { - return vredsum_vs_i16m8_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m8_i16m1_m (vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16m8_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst, - vint32mf2_t vector, vint32m1_t scalar, - size_t vl) { - return vredsum_vs_i32mf2_i32m1_m(mask, dst, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32mf2_i32m1_m (vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32mf2_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst, - vint32m1_t vector, vint32m1_t scalar, - size_t vl) { - return vredsum_vs_i32m1_i32m1_m(mask, dst, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m1_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32m1_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst, - vint32m2_t vector, vint32m1_t scalar, - size_t vl) { - return vredsum_vs_i32m2_i32m1_m(mask, dst, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m2_i32m1_m (vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32m2_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst, - vint32m4_t vector, vint32m1_t scalar, - size_t vl) { - return vredsum_vs_i32m4_i32m1_m(mask, dst, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m4_i32m1_m (vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32m4_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst, - vint32m8_t vector, vint32m1_t scalar, - size_t vl) { - return vredsum_vs_i32m8_i32m1_m(mask, dst, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m8_i32m1_m (vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32m8_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst, - vint64m1_t vector, vint64m1_t scalar, - size_t vl) { - return vredsum_vs_i64m1_i64m1_m(mask, dst, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m1_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_vs_i64m1_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst, - vint64m2_t vector, vint64m1_t scalar, - size_t vl) { - return vredsum_vs_i64m2_i64m1_m(mask, dst, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m2_i64m1_m (vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_vs_i64m2_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst, - vint64m4_t vector, vint64m1_t scalar, - size_t vl) { - return vredsum_vs_i64m4_i64m1_m(mask, dst, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m4_i64m1_m (vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_vs_i64m4_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst, - vint64m8_t vector, vint64m1_t scalar, - size_t vl) { - return vredsum_vs_i64m8_i64m1_m(mask, dst, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m8_i64m1_m (vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_vs_i64m8_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst, - vuint8mf8_t vector, vuint8m1_t scalar, - size_t vl) { - return vredsum_vs_u8mf8_u8m1_m(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf8_u8m1_m (vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8mf8_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst, - vuint8mf4_t vector, vuint8m1_t scalar, - size_t vl) { - return vredsum_vs_u8mf4_u8m1_m(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf4_u8m1_m (vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8mf4_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst, - vuint8mf2_t vector, vuint8m1_t scalar, - size_t vl) { - return vredsum_vs_u8mf2_u8m1_m(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf2_u8m1_m (vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8mf2_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst, - vuint8m1_t vector, vuint8m1_t scalar, - size_t vl) { - return vredsum_vs_u8m1_u8m1_m(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m1_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8m1_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst, - vuint8m2_t vector, vuint8m1_t scalar, - size_t vl) { - return vredsum_vs_u8m2_u8m1_m(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m2_u8m1_m (vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8m2_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst, - vuint8m4_t vector, vuint8m1_t scalar, - size_t vl) { - return vredsum_vs_u8m4_u8m1_m(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m4_u8m1_m (vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8m4_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst, - vuint8m8_t vector, vuint8m1_t scalar, - size_t vl) { - return vredsum_vs_u8m8_u8m1_m(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m8_u8m1_m (vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8m8_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst, - vuint16mf4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16mf4_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16mf4_u16m1_m (vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16mf4_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst, - vuint16mf2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16mf2_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16mf2_u16m1_m (vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16mf2_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst, - vuint16m1_t vector, - vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16m1_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m1_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m1_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst, - vuint16m2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16m2_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m2_u16m1_m (vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m2_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst, - vuint16m4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16m4_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m4_u16m1_m (vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m4_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst, - vuint16m8_t vector, - vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16m8_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m8_u16m1_m (vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m8_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst, - vuint32mf2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32mf2_u32m1_m(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32mf2_u32m1_m (vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32mf2_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst, - vuint32m1_t vector, - vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32m1_u32m1_m(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m1_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m1_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst, - vuint32m2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32m2_u32m1_m(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m2_u32m1_m (vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m2_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst, - vuint32m4_t vector, - vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32m4_u32m1_m(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m4_u32m1_m (vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m4_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst, - vuint32m8_t vector, - vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32m8_u32m1_m(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m8_u32m1_m (vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m8_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst, - vuint64m1_t vector, - vuint64m1_t scalar, size_t vl) { - return vredsum_vs_u64m1_u64m1_m(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m1_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m1_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst, - vuint64m2_t vector, - vuint64m1_t scalar, size_t vl) { - return vredsum_vs_u64m2_u64m1_m(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m2_u64m1_m (vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m2_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst, - vuint64m4_t vector, - vuint64m1_t scalar, size_t vl) { - return vredsum_vs_u64m4_u64m1_m(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m4_u64m1_m (vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m4_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst, - vuint64m8_t vector, - vuint64m1_t scalar, size_t vl) { - return vredsum_vs_u64m8_u64m1_m(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m8_u64m1_m (vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m8_u64m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8mf8_i8m1_tu (vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8mf8_i8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8mf4_i8m1_tu (vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8mf4_i8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8mf2_i8m1_tu (vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8mf2_i8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m1_i8m1_tu (vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8m1_i8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m2_i8m1_tu (vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8m2_i8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m4_i8m1_tu (vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8m4_i8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m8_i8m1_tu (vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8m8_i8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16mf4_i16m1_tu (vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16mf4_i16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16mf2_i16m1_tu (vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16mf2_i16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m1_i16m1_tu (vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16m1_i16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m2_i16m1_tu (vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16m2_i16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m4_i16m1_tu (vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16m4_i16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m8_i16m1_tu (vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16m8_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32mf2_i32m1_tu(vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32mf2_i32m1_tu(merge, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32mf2_i32m1_tu (vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32mf2_i32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_tu( +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32mf2_u32m1_tu(merge, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m1_i32m1_tu (vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32m1_i32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_ta( +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32mf2_i32m1_ta(vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m2_i32m1_tu (vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32m2_i32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_ta( +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32mf2_u32m1_ta(vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m4_i32m1_tu (vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32m4_i32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_tum( +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32mf2_i32m1_tum(mask, merge, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m8_i32m1_tu (vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32m8_i32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_tum( +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m1_i64m1_tu (vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_vs_i64m1_i64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m2_i64m1_tu (vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_vs_i64m2_i64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m4_i64m1_tu (vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_vs_i64m4_i64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m8_i64m1_tu (vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_vs_i64m8_i64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8mf8_u8m1_tu (vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8mf8_u8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8mf4_u8m1_tu (vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8mf4_u8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8mf2_u8m1_tu (vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8mf2_u8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m1_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8m1_u8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m2_u8m1_tu (vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8m2_u8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m4_u8m1_tu (vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8m4_u8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m8_u8m1_tu (vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8m8_u8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16mf4_u16m1_tu (vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16mf4_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16mf2_u16m1_tu (vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16mf2_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m1_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m1_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m2_u16m1_tu (vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m2_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m4_u16m1_tu (vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m4_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m8_u16m1_tu (vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m8_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32mf2_u32m1_tum(mask, merge, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tu (vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32mf2_u32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_tam( +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32mf2_i32m1_tam(mask, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m1_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m1_u32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_tam( +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32mf2_u32m1_tam(mask, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m2_u32m1_tu (vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m2_u32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m4_u32m1_tu (vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m4_u32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m8_u32m1_tu (vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m8_u32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m1_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m1_u64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m2_u64m1_tu (vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m2_u64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m4_u64m1_tu (vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m4_u64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m8_u64m1_tu (vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m8_u64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8mf8_i8m1_ta (vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8mf8_i8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8mf4_i8m1_ta (vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8mf4_i8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8mf2_i8m1_ta (vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8mf2_i8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m1_i8m1_ta (vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8m1_i8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m2_i8m1_ta (vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8m2_i8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m4_i8m1_ta (vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8m4_i8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m8_i8m1_ta (vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8m8_i8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16mf4_i16m1_ta (vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16mf4_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16mf2_i16m1_ta (vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16mf2_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m1_i16m1_ta (vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16m1_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m2_i16m1_ta (vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16m2_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m4_i16m1_ta (vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16m4_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m8_i16m1_ta (vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16m8_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32mf2_i32m1_ta (vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32mf2_i32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m1_i32m1_ta (vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32m1_i32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m2_i32m1_ta (vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32m2_i32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m4_i32m1_ta (vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32m4_i32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m8_i32m1_ta (vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32m8_i32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m1_i64m1_ta (vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_vs_i64m1_i64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m2_i64m1_ta (vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_vs_i64m2_i64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m4_i64m1_ta (vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_vs_i64m4_i64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m8_i64m1_ta (vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_vs_i64m8_i64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8mf8_u8m1_ta (vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8mf8_u8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8mf4_u8m1_ta (vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8mf4_u8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8mf2_u8m1_ta (vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8mf2_u8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m1_u8m1_ta (vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8m1_u8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m2_u8m1_ta (vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8m2_u8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m4_u8m1_ta (vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8m4_u8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m8_u8m1_ta (vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8m8_u8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16mf4_u16m1_ta (vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16mf4_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16mf2_u16m1_ta (vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16mf2_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m1_u16m1_ta (vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m1_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m2_u16m1_ta (vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m2_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m4_u16m1_ta (vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m4_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m8_u16m1_ta (vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m8_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32mf2_u32m1_ta (vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32mf2_u32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m1_u32m1_ta (vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m1_u32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m2_u32m1_ta (vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m2_u32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m4_u32m1_ta (vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m4_u32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m8_u32m1_ta (vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m8_u32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m1_u64m1_ta (vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m1_u64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m2_u64m1_ta (vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m2_u64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m4_u64m1_ta (vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m4_u64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m8_u64m1_ta (vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m8_u64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8mf8_i8m1_tum (vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8mf8_i8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8mf4_i8m1_tum (vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8mf4_i8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8mf2_i8m1_tum (vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8mf2_i8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m1_i8m1_tum (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8m1_i8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m2_i8m1_tum (vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8m2_i8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m4_i8m1_tum (vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8m4_i8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m8_i8m1_tum (vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8m8_i8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16mf4_i16m1_tum (vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16mf4_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16mf2_i16m1_tum (vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16mf2_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m1_i16m1_tum (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16m1_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m2_i16m1_tum (vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16m2_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m4_i16m1_tum (vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16m4_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m8_i16m1_tum (vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16m8_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32mf2_i32m1_tum (vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32mf2_i32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m1_i32m1_tum (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32m1_i32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m2_i32m1_tum (vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32m2_i32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m4_i32m1_tum (vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32m4_i32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m8_i32m1_tum (vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32m8_i32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m1_i64m1_tum (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_vs_i64m1_i64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m2_i64m1_tum (vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_vs_i64m2_i64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m4_i64m1_tum (vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_vs_i64m4_i64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m8_i64m1_tum (vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_vs_i64m8_i64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8mf8_u8m1_tum (vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8mf8_u8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8mf4_u8m1_tum (vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8mf4_u8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8mf2_u8m1_tum (vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8mf2_u8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m1_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8m1_u8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m2_u8m1_tum (vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8m2_u8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m4_u8m1_tum (vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8m4_u8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m8_u8m1_tum (vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8m8_u8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16mf4_u16m1_tum (vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16mf4_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16mf2_u16m1_tum (vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16mf2_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m1_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m1_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m2_u16m1_tum (vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m2_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m4_u16m1_tum (vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m4_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m8_u16m1_tum (vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m8_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tum (vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32mf2_u32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m1_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m1_u32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m2_u32m1_tum (vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m2_u32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m4_u32m1_tum (vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m4_u32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m8_u32m1_tum (vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m8_u32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m1_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m1_u64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m2_u64m1_tum (vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m2_u64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m4_u64m1_tum (vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m4_u64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m8_u64m1_tum (vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m8_u64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8mf8_i8m1_tam (vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8mf8_i8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8mf4_i8m1_tam (vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8mf4_i8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8mf2_i8m1_tam (vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8mf2_i8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m1_i8m1_tam (vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8m1_i8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m2_i8m1_tam (vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8m2_i8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m4_i8m1_tam (vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8m4_i8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m8_i8m1_tam (vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8m8_i8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16mf4_i16m1_tam (vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16mf4_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16mf2_i16m1_tam (vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16mf2_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m1_i16m1_tam (vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16m1_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m2_i16m1_tam (vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16m2_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m4_i16m1_tam (vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16m4_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m8_i16m1_tam (vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16m8_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32mf2_i32m1_tam (vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32mf2_i32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m1_i32m1_tam (vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32m1_i32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m2_i32m1_tam (vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32m2_i32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m4_i32m1_tam (vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32m4_i32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m8_i32m1_tam (vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32m8_i32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m1_i64m1_tam (vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_vs_i64m1_i64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m2_i64m1_tam (vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_vs_i64m2_i64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m4_i64m1_tam (vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_vs_i64m4_i64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m8_i64m1_tam (vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_vs_i64m8_i64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8mf8_u8m1_tam (vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8mf8_u8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8mf4_u8m1_tam (vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8mf4_u8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8mf2_u8m1_tam (vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8mf2_u8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m1_u8m1_tam (vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8m1_u8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m2_u8m1_tam (vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8m2_u8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m4_u8m1_tam (vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8m4_u8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m8_u8m1_tam (vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8m8_u8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16mf4_u16m1_tam (vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16mf4_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16mf2_u16m1_tam (vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16mf2_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m1_u16m1_tam (vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m1_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m2_u16m1_tam (vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m2_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m4_u16m1_tam (vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m4_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m8_u16m1_tam (vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m8_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tam (vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32mf2_u32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m1_u32m1_tam (vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m1_u32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m2_u32m1_tam (vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m2_u32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m4_u32m1_tam (vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m4_u32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m8_u32m1_tam (vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m8_u32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m1_u64m1_tam (vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m1_u64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m2_u64m1_tam (vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m2_u64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m4_u64m1_tam (vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m4_u64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m8_u64m1_tam (vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m8_u64m1_tam(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredxor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredxor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredxor.c @@ -6,996 +6,2376 @@ // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector, - vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8mf8_i8m1(dst, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf8_i8m1 (vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8mf8_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector, - vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8mf4_i8m1(dst, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf4_i8m1 (vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8mf4_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector, - vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8mf2_i8m1(dst, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf2_i8m1 (vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8mf2_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector, - vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8m1_i8m1(dst, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m1_i8m1 (vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8m1_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector, - vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8m2_i8m1(dst, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m2_i8m1 (vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8m2_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector, - vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8m4_i8m1(dst, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m4_i8m1 (vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8m4_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector, - vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8m8_i8m1(dst, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m8_i8m1 (vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8m8_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector, - vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16mf4_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16mf4_i16m1 (vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16mf4_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector, - vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16mf2_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16mf2_i16m1 (vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16mf2_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector, - vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16m1_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m1_i16m1 (vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16m1_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector, - vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16m2_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m2_i16m1 (vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16m2_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector, - vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16m4_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m4_i16m1 (vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16m4_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector, - vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16m8_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m8_i16m1 (vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16m8_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector, - vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32mf2_i32m1(dst, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32mf2_i32m1 (vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32mf2_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector, - vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32m1_i32m1(dst, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m1_i32m1 (vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32m1_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector, - vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32m2_i32m1(dst, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m2_i32m1 (vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32m2_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector, - vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32m4_i32m1(dst, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m4_i32m1 (vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32m4_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector, - vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32m8_i32m1(dst, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m8_i32m1 (vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32m8_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector, - vint64m1_t scalar, size_t vl) { - return vredxor_vs_i64m1_i64m1(dst, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m1_i64m1 (vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_vs_i64m1_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector, - vint64m1_t scalar, size_t vl) { - return vredxor_vs_i64m2_i64m1(dst, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m2_i64m1 (vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_vs_i64m2_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector, - vint64m1_t scalar, size_t vl) { - return vredxor_vs_i64m4_i64m1(dst, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m4_i64m1 (vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_vs_i64m4_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector, - vint64m1_t scalar, size_t vl) { - return vredxor_vs_i64m8_i64m1(dst, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m8_i64m1 (vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_vs_i64m8_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector, - vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8mf8_u8m1(dst, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf8_u8m1 (vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8mf8_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector, - vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8mf4_u8m1(dst, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf4_u8m1 (vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8mf4_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector, - vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8mf2_u8m1(dst, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf2_u8m1 (vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8mf2_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector, - vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8m1_u8m1(dst, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m1_u8m1 (vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8m1_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector, - vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8m2_u8m1(dst, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m2_u8m1 (vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8m2_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector, - vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8m4_u8m1(dst, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m4_u8m1 (vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8m4_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector, - vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8m8_u8m1(dst, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m8_u8m1 (vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8m8_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16mf4_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16mf4_u16m1 (vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16mf4_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16mf2_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16mf2_u16m1 (vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16mf2_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector, - vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16m1_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m1_u16m1 (vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m1_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16m2_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m2_u16m1 (vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m2_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16m4_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m4_u16m1 (vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m4_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector, - vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16m8_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m8_u16m1 (vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m8_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32mf2_u32m1(dst, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32mf2_u32m1 (vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32mf2_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector, - vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32m1_u32m1(dst, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m1_u32m1 (vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m1_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32m2_u32m1(dst, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m2_u32m1 (vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m2_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector, - vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32m4_u32m1(dst, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m4_u32m1 (vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m4_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector, - vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32m8_u32m1(dst, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m8_u32m1 (vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m8_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector, - vuint64m1_t scalar, size_t vl) { - return vredxor_vs_u64m1_u64m1(dst, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m1_u64m1 (vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m1_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector, - vuint64m1_t scalar, size_t vl) { - return vredxor_vs_u64m2_u64m1(dst, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m2_u64m1 (vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m2_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector, - vuint64m1_t scalar, size_t vl) { - return vredxor_vs_u64m4_u64m1(dst, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m4_u64m1 (vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m4_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector, - vuint64m1_t scalar, size_t vl) { - return vredxor_vs_u64m8_u64m1(dst, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m8_u64m1 (vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m8_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst, - vint8mf8_t vector, vint8m1_t scalar, - size_t vl) { - return vredxor_vs_i8mf8_i8m1_m(mask, dst, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf8_i8m1_m (vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8mf8_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst, - vint8mf4_t vector, vint8m1_t scalar, - size_t vl) { - return vredxor_vs_i8mf4_i8m1_m(mask, dst, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf4_i8m1_m (vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8mf4_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst, - vint8mf2_t vector, vint8m1_t scalar, - size_t vl) { - return vredxor_vs_i8mf2_i8m1_m(mask, dst, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf2_i8m1_m (vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8mf2_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst, - vint8m1_t vector, vint8m1_t scalar, - size_t vl) { - return vredxor_vs_i8m1_i8m1_m(mask, dst, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m1_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8m1_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst, - vint8m2_t vector, vint8m1_t scalar, - size_t vl) { - return vredxor_vs_i8m2_i8m1_m(mask, dst, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m2_i8m1_m (vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8m2_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst, - vint8m4_t vector, vint8m1_t scalar, - size_t vl) { - return vredxor_vs_i8m4_i8m1_m(mask, dst, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m4_i8m1_m (vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8m4_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst, - vint8m8_t vector, vint8m1_t scalar, - size_t vl) { - return vredxor_vs_i8m8_i8m1_m(mask, dst, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m8_i8m1_m (vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8m8_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst, - vint16mf4_t vector, vint16m1_t scalar, - size_t vl) { - return vredxor_vs_i16mf4_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16mf4_i16m1_m (vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16mf4_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst, - vint16mf2_t vector, vint16m1_t scalar, - size_t vl) { - return vredxor_vs_i16mf2_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16mf2_i16m1_m (vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16mf2_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst, - vint16m1_t vector, vint16m1_t scalar, - size_t vl) { - return vredxor_vs_i16m1_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m1_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16m1_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst, - vint16m2_t vector, vint16m1_t scalar, - size_t vl) { - return vredxor_vs_i16m2_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m2_i16m1_m (vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16m2_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst, - vint16m4_t vector, vint16m1_t scalar, - size_t vl) { - return vredxor_vs_i16m4_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m4_i16m1_m (vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16m4_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst, - vint16m8_t vector, vint16m1_t scalar, - size_t vl) { - return vredxor_vs_i16m8_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m8_i16m1_m (vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16m8_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst, - vint32mf2_t vector, vint32m1_t scalar, - size_t vl) { - return vredxor_vs_i32mf2_i32m1_m(mask, dst, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32mf2_i32m1_m (vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32mf2_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst, - vint32m1_t vector, vint32m1_t scalar, - size_t vl) { - return vredxor_vs_i32m1_i32m1_m(mask, dst, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m1_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32m1_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst, - vint32m2_t vector, vint32m1_t scalar, - size_t vl) { - return vredxor_vs_i32m2_i32m1_m(mask, dst, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m2_i32m1_m (vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32m2_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst, - vint32m4_t vector, vint32m1_t scalar, - size_t vl) { - return vredxor_vs_i32m4_i32m1_m(mask, dst, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m4_i32m1_m (vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32m4_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst, - vint32m8_t vector, vint32m1_t scalar, - size_t vl) { - return vredxor_vs_i32m8_i32m1_m(mask, dst, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m8_i32m1_m (vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32m8_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst, - vint64m1_t vector, vint64m1_t scalar, - size_t vl) { - return vredxor_vs_i64m1_i64m1_m(mask, dst, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m1_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_vs_i64m1_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst, - vint64m2_t vector, vint64m1_t scalar, - size_t vl) { - return vredxor_vs_i64m2_i64m1_m(mask, dst, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m2_i64m1_m (vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_vs_i64m2_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst, - vint64m4_t vector, vint64m1_t scalar, - size_t vl) { - return vredxor_vs_i64m4_i64m1_m(mask, dst, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m4_i64m1_m (vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_vs_i64m4_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst, - vint64m8_t vector, vint64m1_t scalar, - size_t vl) { - return vredxor_vs_i64m8_i64m1_m(mask, dst, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m8_i64m1_m (vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_vs_i64m8_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst, - vuint8mf8_t vector, vuint8m1_t scalar, - size_t vl) { - return vredxor_vs_u8mf8_u8m1_m(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf8_u8m1_m (vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8mf8_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst, - vuint8mf4_t vector, vuint8m1_t scalar, - size_t vl) { - return vredxor_vs_u8mf4_u8m1_m(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf4_u8m1_m (vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8mf4_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst, - vuint8mf2_t vector, vuint8m1_t scalar, - size_t vl) { - return vredxor_vs_u8mf2_u8m1_m(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf2_u8m1_m (vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8mf2_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst, - vuint8m1_t vector, vuint8m1_t scalar, - size_t vl) { - return vredxor_vs_u8m1_u8m1_m(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m1_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8m1_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst, - vuint8m2_t vector, vuint8m1_t scalar, - size_t vl) { - return vredxor_vs_u8m2_u8m1_m(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m2_u8m1_m (vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8m2_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst, - vuint8m4_t vector, vuint8m1_t scalar, - size_t vl) { - return vredxor_vs_u8m4_u8m1_m(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m4_u8m1_m (vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8m4_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst, - vuint8m8_t vector, vuint8m1_t scalar, - size_t vl) { - return vredxor_vs_u8m8_u8m1_m(mask, dst, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m8_u8m1_m (vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8m8_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst, - vuint16mf4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16mf4_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16mf4_u16m1_m (vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16mf4_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst, - vuint16mf2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16mf2_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16mf2_u16m1_m (vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16mf2_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst, - vuint16m1_t vector, - vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16m1_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m1_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m1_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst, - vuint16m2_t vector, - vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16m2_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m2_u16m1_m (vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m2_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst, - vuint16m4_t vector, - vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16m4_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m4_u16m1_m (vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m4_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst, - vuint16m8_t vector, - vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16m8_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m8_u16m1_m (vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m8_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst, - vuint32mf2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32mf2_u32m1_m(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32mf2_u32m1_m (vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32mf2_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst, - vuint32m1_t vector, - vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32m1_u32m1_m(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m1_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m1_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst, - vuint32m2_t vector, - vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32m2_u32m1_m(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m2_u32m1_m (vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m2_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst, - vuint32m4_t vector, - vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32m4_u32m1_m(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m4_u32m1_m (vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m4_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst, - vuint32m8_t vector, - vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32m8_u32m1_m(mask, dst, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m8_u32m1_m (vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m8_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst, - vuint64m1_t vector, - vuint64m1_t scalar, size_t vl) { - return vredxor_vs_u64m1_u64m1_m(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m1_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m1_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst, - vuint64m2_t vector, - vuint64m1_t scalar, size_t vl) { - return vredxor_vs_u64m2_u64m1_m(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m2_u64m1_m (vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m2_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst, - vuint64m4_t vector, - vuint64m1_t scalar, size_t vl) { - return vredxor_vs_u64m4_u64m1_m(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m4_u64m1_m (vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m4_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst, - vuint64m8_t vector, - vuint64m1_t scalar, size_t vl) { - return vredxor_vs_u64m8_u64m1_m(mask, dst, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m8_u64m1_m (vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m8_u64m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8mf8_i8m1_tu (vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8mf8_i8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8mf4_i8m1_tu (vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8mf4_i8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8mf2_i8m1_tu (vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8mf2_i8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m1_i8m1_tu (vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8m1_i8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m2_i8m1_tu (vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8m2_i8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m4_i8m1_tu (vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8m4_i8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m8_i8m1_tu (vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8m8_i8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16mf4_i16m1_tu (vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16mf4_i16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16mf2_i16m1_tu (vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16mf2_i16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m1_i16m1_tu (vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16m1_i16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m2_i16m1_tu (vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16m2_i16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m4_i16m1_tu (vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16m4_i16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m8_i16m1_tu (vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16m8_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32mf2_i32m1_tu(vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32mf2_i32m1_tu(merge, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32mf2_i32m1_tu (vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32mf2_i32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_tu( +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32mf2_u32m1_tu(merge, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m1_i32m1_tu (vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32m1_i32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_ta( +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32mf2_i32m1_ta(vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m2_i32m1_tu (vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32m2_i32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_ta( +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32mf2_u32m1_ta(vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m4_i32m1_tu (vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32m4_i32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_tum( +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32mf2_i32m1_tum(mask, merge, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m8_i32m1_tu (vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32m8_i32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_tum( +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m1_i64m1_tu (vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_vs_i64m1_i64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m2_i64m1_tu (vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_vs_i64m2_i64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m4_i64m1_tu (vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_vs_i64m4_i64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m8_i64m1_tu (vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_vs_i64m8_i64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8mf8_u8m1_tu (vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8mf8_u8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8mf4_u8m1_tu (vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8mf4_u8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8mf2_u8m1_tu (vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8mf2_u8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m1_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8m1_u8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m2_u8m1_tu (vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8m2_u8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m4_u8m1_tu (vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8m4_u8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m8_u8m1_tu (vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8m8_u8m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16mf4_u16m1_tu (vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16mf4_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16mf2_u16m1_tu (vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16mf2_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m1_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m1_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m2_u16m1_tu (vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m2_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m4_u16m1_tu (vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m4_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m8_u16m1_tu (vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m8_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32mf2_u32m1_tum(mask, merge, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tu (vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32mf2_u32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_tam( +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32mf2_i32m1_tam(mask, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m1_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m1_u32m1_tu(maskedoff, vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_tam( +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32mf2_u32m1_tam(mask, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m2_u32m1_tu (vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m2_u32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m4_u32m1_tu (vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m4_u32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m8_u32m1_tu (vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m8_u32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m1_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m1_u64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m2_u64m1_tu (vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m2_u64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m4_u64m1_tu (vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m4_u64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m8_u64m1_tu (vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m8_u64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8mf8_i8m1_ta (vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8mf8_i8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8mf4_i8m1_ta (vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8mf4_i8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8mf2_i8m1_ta (vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8mf2_i8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m1_i8m1_ta (vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8m1_i8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m2_i8m1_ta (vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8m2_i8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m4_i8m1_ta (vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8m4_i8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m8_i8m1_ta (vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8m8_i8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16mf4_i16m1_ta (vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16mf4_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16mf2_i16m1_ta (vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16mf2_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m1_i16m1_ta (vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16m1_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m2_i16m1_ta (vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16m2_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m4_i16m1_ta (vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16m4_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m8_i16m1_ta (vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16m8_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32mf2_i32m1_ta (vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32mf2_i32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m1_i32m1_ta (vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32m1_i32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m2_i32m1_ta (vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32m2_i32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m4_i32m1_ta (vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32m4_i32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m8_i32m1_ta (vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32m8_i32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m1_i64m1_ta (vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_vs_i64m1_i64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m2_i64m1_ta (vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_vs_i64m2_i64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m4_i64m1_ta (vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_vs_i64m4_i64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m8_i64m1_ta (vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_vs_i64m8_i64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8mf8_u8m1_ta (vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8mf8_u8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8mf4_u8m1_ta (vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8mf4_u8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8mf2_u8m1_ta (vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8mf2_u8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m1_u8m1_ta (vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8m1_u8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m2_u8m1_ta (vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8m2_u8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m4_u8m1_ta (vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8m4_u8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m8_u8m1_ta (vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8m8_u8m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16mf4_u16m1_ta (vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16mf4_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16mf2_u16m1_ta (vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16mf2_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m1_u16m1_ta (vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m1_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m2_u16m1_ta (vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m2_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m4_u16m1_ta (vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m4_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m8_u16m1_ta (vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m8_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32mf2_u32m1_ta (vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32mf2_u32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m1_u32m1_ta (vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m1_u32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m2_u32m1_ta (vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m2_u32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m4_u32m1_ta (vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m4_u32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m8_u32m1_ta (vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m8_u32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m1_u64m1_ta (vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m1_u64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m2_u64m1_ta (vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m2_u64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m4_u64m1_ta (vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m4_u64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m8_u64m1_ta (vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m8_u64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8mf8_i8m1_tum (vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8mf8_i8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8mf4_i8m1_tum (vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8mf4_i8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8mf2_i8m1_tum (vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8mf2_i8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m1_i8m1_tum (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8m1_i8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m2_i8m1_tum (vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8m2_i8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m4_i8m1_tum (vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8m4_i8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m8_i8m1_tum (vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8m8_i8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16mf4_i16m1_tum (vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16mf4_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16mf2_i16m1_tum (vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16mf2_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m1_i16m1_tum (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16m1_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m2_i16m1_tum (vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16m2_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m4_i16m1_tum (vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16m4_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m8_i16m1_tum (vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16m8_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32mf2_i32m1_tum (vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32mf2_i32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m1_i32m1_tum (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32m1_i32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m2_i32m1_tum (vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32m2_i32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m4_i32m1_tum (vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32m4_i32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m8_i32m1_tum (vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32m8_i32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m1_i64m1_tum (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_vs_i64m1_i64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m2_i64m1_tum (vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_vs_i64m2_i64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m4_i64m1_tum (vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_vs_i64m4_i64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m8_i64m1_tum (vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_vs_i64m8_i64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8mf8_u8m1_tum (vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8mf8_u8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8mf4_u8m1_tum (vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8mf4_u8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8mf2_u8m1_tum (vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8mf2_u8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m1_u8m1_tum (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8m1_u8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m2_u8m1_tum (vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8m2_u8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m4_u8m1_tum (vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8m4_u8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m8_u8m1_tum (vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8m8_u8m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16mf4_u16m1_tum (vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16mf4_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16mf2_u16m1_tum (vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16mf2_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m1_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m1_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m2_u16m1_tum (vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m2_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m4_u16m1_tum (vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m4_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m8_u16m1_tum (vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m8_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tum (vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32mf2_u32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m1_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m1_u32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m2_u32m1_tum (vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m2_u32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m4_u32m1_tum (vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m4_u32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m8_u32m1_tum (vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m8_u32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m1_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m1_u64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m2_u64m1_tum (vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m2_u64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m4_u64m1_tum (vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m4_u64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m8_u64m1_tum (vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m8_u64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8mf8_i8m1_tam (vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8mf8_i8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8mf4_i8m1_tam (vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8mf4_i8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8mf2_i8m1_tam (vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8mf2_i8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m1_i8m1_tam (vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8m1_i8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m2_i8m1_tam (vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8m2_i8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m4_i8m1_tam (vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8m4_i8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m8_i8m1_tam (vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8m8_i8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16mf4_i16m1_tam (vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16mf4_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16mf2_i16m1_tam (vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16mf2_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m1_i16m1_tam (vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16m1_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m2_i16m1_tam (vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16m2_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m4_i16m1_tam (vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16m4_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m8_i16m1_tam (vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16m8_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32mf2_i32m1_tam (vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32mf2_i32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m1_i32m1_tam (vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32m1_i32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m2_i32m1_tam (vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32m2_i32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m4_i32m1_tam (vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32m4_i32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m8_i32m1_tam (vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32m8_i32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m1_i64m1_tam (vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_vs_i64m1_i64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m2_i64m1_tam (vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_vs_i64m2_i64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m4_i64m1_tam (vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_vs_i64m4_i64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m8_i64m1_tam (vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_vs_i64m8_i64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8mf8_u8m1_tam (vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8mf8_u8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8mf4_u8m1_tam (vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8mf4_u8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8mf2_u8m1_tam (vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8mf2_u8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m1_u8m1_tam (vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8m1_u8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m2_u8m1_tam (vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8m2_u8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m4_u8m1_tam (vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8m4_u8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m8_u8m1_tam (vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8m8_u8m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16mf4_u16m1_tam (vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16mf4_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16mf2_u16m1_tam (vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16mf2_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m1_u16m1_tam (vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m1_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m2_u16m1_tam (vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m2_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m4_u16m1_tam (vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m4_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m8_u16m1_tam (vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m8_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tam (vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32mf2_u32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m1_u32m1_tam (vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m1_u32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m2_u32m1_tam (vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m2_u32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m4_u32m1_tam (vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m4_u32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m8_u32m1_tam (vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m8_u32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m1_u64m1_tam (vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m1_u64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m2_u64m1_tam (vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m2_u64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m4_u64m1_tam (vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m4_u64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m8_u64m1_tam (vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m8_u64m1_tam(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwredsum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwredsum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwredsum.c @@ -6,776 +6,1091 @@ // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8mf8_i16m1(vint16m1_t dst, vint8mf8_t vector, - vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8mf8_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf8_i16m1 (vint8mf8_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8mf8_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8mf4_i16m1(vint16m1_t dst, vint8mf4_t vector, - vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8mf4_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf4_i16m1 (vint8mf4_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8mf4_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8mf2_i16m1(vint16m1_t dst, vint8mf2_t vector, - vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8mf2_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf2_i16m1 (vint8mf2_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8mf2_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m1_i16m1(vint16m1_t dst, vint8m1_t vector, - vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8m1_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m1_i16m1 (vint8m1_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8m1_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m2_i16m1(vint16m1_t dst, vint8m2_t vector, - vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8m2_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m2_i16m1 (vint8m2_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8m2_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m4_i16m1(vint16m1_t dst, vint8m4_t vector, - vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8m4_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m4_i16m1 (vint8m4_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8m4_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m8_i16m1(vint16m1_t dst, vint8m8_t vector, - vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8m8_i16m1(dst, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m8_i16m1 (vint8m8_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8m8_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16mf4_i32m1(vint32m1_t dst, vint16mf4_t vector, - vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16mf4_i32m1(dst, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16mf4_i32m1 (vint16mf4_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16mf4_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16mf2_i32m1(vint32m1_t dst, vint16mf2_t vector, - vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16mf2_i32m1(dst, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16mf2_i32m1 (vint16mf2_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16mf2_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m1_i32m1(vint32m1_t dst, vint16m1_t vector, - vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16m1_i32m1(dst, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m1_i32m1 (vint16m1_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16m1_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m2_i32m1(vint32m1_t dst, vint16m2_t vector, - vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16m2_i32m1(dst, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m2_i32m1 (vint16m2_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16m2_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m4_i32m1(vint32m1_t dst, vint16m4_t vector, - vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16m4_i32m1(dst, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m4_i32m1 (vint16m4_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16m4_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m8_i32m1(vint32m1_t dst, vint16m8_t vector, - vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16m8_i32m1(dst, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m8_i32m1 (vint16m8_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16m8_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32mf2_i64m1(vint64m1_t dst, vint32mf2_t vector, - vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32mf2_i64m1(dst, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32mf2_i64m1 (vint32mf2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32mf2_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m1_i64m1(vint64m1_t dst, vint32m1_t vector, - vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32m1_i64m1(dst, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m1_i64m1 (vint32m1_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32m1_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m2_i64m1(vint64m1_t dst, vint32m2_t vector, - vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32m2_i64m1(dst, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m2_i64m1 (vint32m2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32m2_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m4_i64m1(vint64m1_t dst, vint32m4_t vector, - vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32m4_i64m1(dst, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m4_i64m1 (vint32m4_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32m4_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m8_i64m1(vint64m1_t dst, vint32m8_t vector, - vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32m8_i64m1(dst, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m8_i64m1 (vint32m8_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32m8_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1(vuint16m1_t dst, vuint8mf8_t vector, - vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8mf8_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1 (vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8mf8_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1(vuint16m1_t dst, vuint8mf4_t vector, - vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8mf4_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1 (vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8mf4_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1(vuint16m1_t dst, vuint8mf2_t vector, - vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8mf2_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1 (vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8mf2_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m1_u16m1(vuint16m1_t dst, vuint8m1_t vector, - vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8m1_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m1_u16m1 (vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m1_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m2_u16m1(vuint16m1_t dst, vuint8m2_t vector, - vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8m2_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m2_u16m1 (vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m2_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m4_u16m1(vuint16m1_t dst, vuint8m4_t vector, - vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8m4_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m4_u16m1 (vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m4_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m8_u16m1(vuint16m1_t dst, vuint8m8_t vector, - vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8m8_u16m1(dst, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m8_u16m1 (vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m8_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1(vuint32m1_t dst, vuint16mf4_t vector, - vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16mf4_u32m1(dst, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1 (vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16mf4_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1(vuint32m1_t dst, vuint16mf2_t vector, - vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16mf2_u32m1(dst, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1 (vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16mf2_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m1_u32m1(vuint32m1_t dst, vuint16m1_t vector, - vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16m1_u32m1(dst, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m1_u32m1 (vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m1_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m2_u32m1(vuint32m1_t dst, vuint16m2_t vector, - vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16m2_u32m1(dst, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m2_u32m1 (vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m2_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m4_u32m1(vuint32m1_t dst, vuint16m4_t vector, - vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16m4_u32m1(dst, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m4_u32m1 (vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m4_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m8_u32m1(vuint32m1_t dst, vuint16m8_t vector, - vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16m8_u32m1(dst, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m8_u32m1 (vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m8_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1(vuint64m1_t dst, vuint32mf2_t vector, - vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32mf2_u64m1(dst, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1 (vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32mf2_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m1_u64m1(vuint64m1_t dst, vuint32m1_t vector, - vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32m1_u64m1(dst, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m1_u64m1 (vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m1_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m2_u64m1(vuint64m1_t dst, vuint32m2_t vector, - vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32m2_u64m1(dst, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m2_u64m1 (vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m2_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m4_u64m1(vuint64m1_t dst, vuint32m4_t vector, - vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32m4_u64m1(dst, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m4_u64m1 (vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m4_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m8_u64m1(vuint64m1_t dst, vuint32m8_t vector, - vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32m8_u64m1(dst, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m8_u64m1 (vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m8_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8mf8_i16m1_m(vbool64_t mask, vint16m1_t dst, - vint8mf8_t vector, vint16m1_t scalar, - size_t vl) { - return vwredsum_vs_i8mf8_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf8_i16m1_m (vbool64_t mask, vint16m1_t maskedoff, vint8mf8_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8mf8_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8mf4_i16m1_m(vbool32_t mask, vint16m1_t dst, - vint8mf4_t vector, vint16m1_t scalar, - size_t vl) { - return vwredsum_vs_i8mf4_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf4_i16m1_m (vbool32_t mask, vint16m1_t maskedoff, vint8mf4_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8mf4_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8mf2_i16m1_m(vbool16_t mask, vint16m1_t dst, - vint8mf2_t vector, vint16m1_t scalar, - size_t vl) { - return vwredsum_vs_i8mf2_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf2_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8mf2_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m1_i16m1_m(vbool8_t mask, vint16m1_t dst, - vint8m1_t vector, vint16m1_t scalar, - size_t vl) { - return vwredsum_vs_i8m1_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m1_i16m1_m (vbool8_t mask, vint16m1_t maskedoff, vint8m1_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8m1_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m2_i16m1_m(vbool4_t mask, vint16m1_t dst, - vint8m2_t vector, vint16m1_t scalar, - size_t vl) { - return vwredsum_vs_i8m2_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m2_i16m1_m (vbool4_t mask, vint16m1_t maskedoff, vint8m2_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8m2_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m4_i16m1_m(vbool2_t mask, vint16m1_t dst, - vint8m4_t vector, vint16m1_t scalar, - size_t vl) { - return vwredsum_vs_i8m4_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m4_i16m1_m (vbool2_t mask, vint16m1_t maskedoff, vint8m4_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8m4_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m8_i16m1_m(vbool1_t mask, vint16m1_t dst, - vint8m8_t vector, vint16m1_t scalar, - size_t vl) { - return vwredsum_vs_i8m8_i16m1_m(mask, dst, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m8_i16m1_m (vbool1_t mask, vint16m1_t maskedoff, vint8m8_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8m8_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16mf4_i32m1_m(vbool64_t mask, vint32m1_t dst, - vint16mf4_t vector, - vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16mf4_i32m1_m(mask, dst, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16mf4_i32m1_m (vbool64_t mask, vint32m1_t maskedoff, vint16mf4_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16mf4_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16mf2_i32m1_m(vbool32_t mask, vint32m1_t dst, - vint16mf2_t vector, - vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16mf2_i32m1_m(mask, dst, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16mf2_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16mf2_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m1_i32m1_m(vbool16_t mask, vint32m1_t dst, - vint16m1_t vector, vint32m1_t scalar, - size_t vl) { - return vwredsum_vs_i16m1_i32m1_m(mask, dst, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m1_i32m1_m (vbool16_t mask, vint32m1_t maskedoff, vint16m1_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16m1_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m2_i32m1_m(vbool8_t mask, vint32m1_t dst, - vint16m2_t vector, vint32m1_t scalar, - size_t vl) { - return vwredsum_vs_i16m2_i32m1_m(mask, dst, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m2_i32m1_m (vbool8_t mask, vint32m1_t maskedoff, vint16m2_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16m2_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m4_i32m1_m(vbool4_t mask, vint32m1_t dst, - vint16m4_t vector, vint32m1_t scalar, - size_t vl) { - return vwredsum_vs_i16m4_i32m1_m(mask, dst, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m4_i32m1_m (vbool4_t mask, vint32m1_t maskedoff, vint16m4_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16m4_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m8_i32m1_m(vbool2_t mask, vint32m1_t dst, - vint16m8_t vector, vint32m1_t scalar, - size_t vl) { - return vwredsum_vs_i16m8_i32m1_m(mask, dst, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m8_i32m1_m (vbool2_t mask, vint32m1_t maskedoff, vint16m8_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16m8_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32mf2_i64m1_m(vbool64_t mask, vint64m1_t dst, - vint32mf2_t vector, - vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32mf2_i64m1_m(mask, dst, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32mf2_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32mf2_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m1_i64m1_m(vbool32_t mask, vint64m1_t dst, - vint32m1_t vector, vint64m1_t scalar, - size_t vl) { - return vwredsum_vs_i32m1_i64m1_m(mask, dst, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m1_i64m1_m (vbool32_t mask, vint64m1_t maskedoff, vint32m1_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32m1_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m2_i64m1_m(vbool16_t mask, vint64m1_t dst, - vint32m2_t vector, vint64m1_t scalar, - size_t vl) { - return vwredsum_vs_i32m2_i64m1_m(mask, dst, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m2_i64m1_m (vbool16_t mask, vint64m1_t maskedoff, vint32m2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32m2_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m4_i64m1_m(vbool8_t mask, vint64m1_t dst, - vint32m4_t vector, vint64m1_t scalar, - size_t vl) { - return vwredsum_vs_i32m4_i64m1_m(mask, dst, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m4_i64m1_m (vbool8_t mask, vint64m1_t maskedoff, vint32m4_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32m4_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m8_i64m1_m(vbool4_t mask, vint64m1_t dst, - vint32m8_t vector, vint64m1_t scalar, - size_t vl) { - return vwredsum_vs_i32m8_i64m1_m(mask, dst, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m8_i64m1_m (vbool4_t mask, vint64m1_t maskedoff, vint32m8_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32m8_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_m(vbool64_t mask, vuint16m1_t dst, - vuint8mf8_t vector, - vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8mf8_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_m (vbool64_t mask, vuint16m1_t maskedoff, vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8mf8_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_m(vbool32_t mask, vuint16m1_t dst, - vuint8mf4_t vector, - vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8mf4_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_m (vbool32_t mask, vuint16m1_t maskedoff, vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8mf4_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_m(vbool16_t mask, vuint16m1_t dst, - vuint8mf2_t vector, - vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8mf2_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8mf2_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_m(vbool8_t mask, vuint16m1_t dst, - vuint8m1_t vector, - vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8m1_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_m (vbool8_t mask, vuint16m1_t maskedoff, vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m1_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_m(vbool4_t mask, vuint16m1_t dst, - vuint8m2_t vector, - vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8m2_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_m (vbool4_t mask, vuint16m1_t maskedoff, vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m2_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_m(vbool2_t mask, vuint16m1_t dst, - vuint8m4_t vector, - vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8m4_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_m (vbool2_t mask, vuint16m1_t maskedoff, vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m4_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_m(vbool1_t mask, vuint16m1_t dst, - vuint8m8_t vector, - vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8m8_u16m1_m(mask, dst, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_m (vbool1_t mask, vuint16m1_t maskedoff, vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m8_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_m(vbool64_t mask, vuint32m1_t dst, - vuint16mf4_t vector, - vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16mf4_u32m1_m(mask, dst, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_m (vbool64_t mask, vuint32m1_t maskedoff, vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16mf4_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_m(vbool32_t mask, vuint32m1_t dst, - vuint16mf2_t vector, - vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16mf2_u32m1_m(mask, dst, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16mf2_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_m(vbool16_t mask, vuint32m1_t dst, - vuint16m1_t vector, - vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16m1_u32m1_m(mask, dst, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_m (vbool16_t mask, vuint32m1_t maskedoff, vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m1_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_m(vbool8_t mask, vuint32m1_t dst, - vuint16m2_t vector, - vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16m2_u32m1_m(mask, dst, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_m (vbool8_t mask, vuint32m1_t maskedoff, vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m2_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_m(vbool4_t mask, vuint32m1_t dst, - vuint16m4_t vector, - vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16m4_u32m1_m(mask, dst, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_m (vbool4_t mask, vuint32m1_t maskedoff, vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m4_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_m(vbool2_t mask, vuint32m1_t dst, - vuint16m8_t vector, - vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16m8_u32m1_m(mask, dst, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_m (vbool2_t mask, vuint32m1_t maskedoff, vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m8_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_m(vbool64_t mask, vuint64m1_t dst, - vuint32mf2_t vector, - vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32mf2_u64m1_m(mask, dst, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32mf2_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_m(vbool32_t mask, vuint64m1_t dst, - vuint32m1_t vector, - vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32m1_u64m1_m(mask, dst, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_m (vbool32_t mask, vuint64m1_t maskedoff, vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m1_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_m(vbool16_t mask, vuint64m1_t dst, - vuint32m2_t vector, - vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32m2_u64m1_m(mask, dst, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_m (vbool16_t mask, vuint64m1_t maskedoff, vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m2_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_m(vbool8_t mask, vuint64m1_t dst, - vuint32m4_t vector, - vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32m4_u64m1_m(mask, dst, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_m (vbool8_t mask, vuint64m1_t maskedoff, vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m4_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_m(vbool4_t mask, vuint64m1_t dst, - vuint32m8_t vector, - vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32m8_u64m1_m(mask, dst, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_m (vbool4_t mask, vuint64m1_t maskedoff, vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m8_u64m1_m(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8mf8_i16m1_tu (vint16m1_t maskedoff, vint8mf8_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8mf8_i16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8mf4_i16m1_tu (vint16m1_t maskedoff, vint8mf4_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8mf4_i16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8mf2_i16m1_tu (vint16m1_t maskedoff, vint8mf2_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8mf2_i16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m1_i16m1_tu (vint16m1_t maskedoff, vint8m1_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8m1_i16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m2_i16m1_tu (vint16m1_t maskedoff, vint8m2_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8m2_i16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m4_i16m1_tu (vint16m1_t maskedoff, vint8m4_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8m4_i16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m8_i16m1_tu (vint16m1_t maskedoff, vint8m8_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8m8_i16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16mf4_i32m1_tu (vint32m1_t maskedoff, vint16mf4_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16mf4_i32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16mf2_i32m1_tu (vint32m1_t maskedoff, vint16mf2_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16mf2_i32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m1_i32m1_tu (vint32m1_t maskedoff, vint16m1_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16m1_i32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m2_i32m1_tu (vint32m1_t maskedoff, vint16m2_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16m2_i32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m4_i32m1_tu (vint32m1_t maskedoff, vint16m4_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16m4_i32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m8_i32m1_tu (vint32m1_t maskedoff, vint16m8_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16m8_i32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tu (vint64m1_t maskedoff, vint32mf2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32mf2_i64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m1_i64m1_tu (vint64m1_t maskedoff, vint32m1_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32m1_i64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m2_i64m1_tu (vint64m1_t maskedoff, vint32m2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32m2_i64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m4_i64m1_tu (vint64m1_t maskedoff, vint32m4_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32m4_i64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tu(vint64m1_t merge, vint32mf2_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32mf2_i64m1_tu(merge, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m8_i64m1_tu (vint64m1_t maskedoff, vint32m8_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32m8_i64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_tu (vuint16m1_t maskedoff, vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8mf8_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_tu (vuint16m1_t maskedoff, vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8mf4_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_tu (vuint16m1_t maskedoff, vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8mf2_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_tu (vuint16m1_t maskedoff, vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m1_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_tu (vuint16m1_t maskedoff, vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m2_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_tu (vuint16m1_t maskedoff, vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m4_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_tu (vuint16m1_t maskedoff, vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m8_u16m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_tu (vuint32m1_t maskedoff, vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16mf4_u32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_tu (vuint32m1_t maskedoff, vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16mf2_u32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_tu (vuint32m1_t maskedoff, vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m1_u32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_tu (vuint32m1_t maskedoff, vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m2_u32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_tu (vuint32m1_t maskedoff, vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m4_u32m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_tu (vuint32m1_t maskedoff, vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m8_u32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tu (vuint64m1_t maskedoff, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32mf2_u64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_tu (vuint64m1_t maskedoff, vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m1_u64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_tu (vuint64m1_t maskedoff, vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m2_u64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_tu (vuint64m1_t maskedoff, vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m4_u64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tu(vuint64m1_t merge, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32mf2_u64m1_tu(merge, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_tu (vuint64m1_t maskedoff, vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m8_u64m1_tu(maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8mf8_i16m1_ta (vint8mf8_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8mf8_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8mf4_i16m1_ta (vint8mf4_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8mf4_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8mf2_i16m1_ta (vint8mf2_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8mf2_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m1_i16m1_ta (vint8m1_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8m1_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m2_i16m1_ta (vint8m2_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8m2_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m4_i16m1_ta (vint8m4_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8m4_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m8_i16m1_ta (vint8m8_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8m8_i16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16mf4_i32m1_ta (vint16mf4_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16mf4_i32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16mf2_i32m1_ta (vint16mf2_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16mf2_i32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m1_i32m1_ta (vint16m1_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16m1_i32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m2_i32m1_ta (vint16m2_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16m2_i32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m4_i32m1_ta (vint16m4_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16m4_i32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m8_i32m1_ta (vint16m8_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16m8_i32m1_ta(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_ta( @@ -783,51 +1098,852 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32mf2_i64m1_ta(vint32mf2_t vector, vint64m1_t scalar, size_t vl) { +vint64m1_t test_vwredsum_vs_i32mf2_i64m1_ta (vint32mf2_t vector, vint64m1_t scalar, size_t vl) { return vwredsum_vs_i32mf2_i64m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_ta( +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_ta(vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32mf2_u64m1_ta(vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m1_i64m1_ta (vint32m1_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32m1_i64m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_tum( +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tum(vbool64_t mask, vint64m1_t merge, vint32mf2_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32mf2_i64m1_tum(mask, merge, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m2_i64m1_ta (vint32m2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32m2_i64m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_tum( +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tum(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32mf2_u64m1_tum(mask, merge, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m4_i64m1_ta (vint32m4_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32m4_i64m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_tam( +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tam(vbool64_t mask, vint32mf2_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32mf2_i64m1_tam(mask, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m8_i64m1_ta (vint32m8_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32m8_i64m1_ta(vector, scalar, vl); } -// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_tam( +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32mf2_u64m1_tam(mask, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_ta (vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8mf8_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_ta (vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8mf4_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_ta (vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8mf2_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_ta (vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m1_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_ta (vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m2_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_ta (vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m4_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_ta (vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m8_u16m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_ta (vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16mf4_u32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_ta (vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16mf2_u32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_ta (vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m1_u32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_ta (vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m2_u32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_ta (vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m4_u32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_ta (vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m8_u32m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_ta (vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32mf2_u64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_ta (vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m1_u64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_ta (vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m2_u64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_ta (vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m4_u64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_ta (vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m8_u64m1_ta(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8mf8_i16m1_tum (vbool64_t mask, vint16m1_t maskedoff, vint8mf8_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8mf8_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8mf4_i16m1_tum (vbool32_t mask, vint16m1_t maskedoff, vint8mf4_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8mf4_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8mf2_i16m1_tum (vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8mf2_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m1_i16m1_tum (vbool8_t mask, vint16m1_t maskedoff, vint8m1_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8m1_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m2_i16m1_tum (vbool4_t mask, vint16m1_t maskedoff, vint8m2_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8m2_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m4_i16m1_tum (vbool2_t mask, vint16m1_t maskedoff, vint8m4_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8m4_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m8_i16m1_tum (vbool1_t mask, vint16m1_t maskedoff, vint8m8_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8m8_i16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16mf4_i32m1_tum (vbool64_t mask, vint32m1_t maskedoff, vint16mf4_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16mf4_i32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16mf2_i32m1_tum (vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16mf2_i32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m1_i32m1_tum (vbool16_t mask, vint32m1_t maskedoff, vint16m1_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16m1_i32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m2_i32m1_tum (vbool8_t mask, vint32m1_t maskedoff, vint16m2_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16m2_i32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m4_i32m1_tum (vbool4_t mask, vint32m1_t maskedoff, vint16m4_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16m4_i32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m8_i32m1_tum (vbool2_t mask, vint32m1_t maskedoff, vint16m8_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16m8_i32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tum (vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32mf2_i64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m1_i64m1_tum (vbool32_t mask, vint64m1_t maskedoff, vint32m1_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32m1_i64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m2_i64m1_tum (vbool16_t mask, vint64m1_t maskedoff, vint32m2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32m2_i64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m4_i64m1_tum (vbool8_t mask, vint64m1_t maskedoff, vint32m4_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32m4_i64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m8_i64m1_tum (vbool4_t mask, vint64m1_t maskedoff, vint32m8_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32m8_i64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_tum (vbool64_t mask, vuint16m1_t maskedoff, vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8mf8_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_tum (vbool32_t mask, vuint16m1_t maskedoff, vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8mf4_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_tum (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8mf2_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_tum (vbool8_t mask, vuint16m1_t maskedoff, vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m1_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_tum (vbool4_t mask, vuint16m1_t maskedoff, vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m2_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_tum (vbool2_t mask, vuint16m1_t maskedoff, vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m4_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_tum (vbool1_t mask, vuint16m1_t maskedoff, vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m8_u16m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_tum (vbool64_t mask, vuint32m1_t maskedoff, vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16mf4_u32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_tum (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16mf2_u32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_tum (vbool16_t mask, vuint32m1_t maskedoff, vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m1_u32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_tum (vbool8_t mask, vuint32m1_t maskedoff, vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m2_u32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_tum (vbool4_t mask, vuint32m1_t maskedoff, vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m4_u32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_tum (vbool2_t mask, vuint32m1_t maskedoff, vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m8_u32m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tum (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32mf2_u64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_tum (vbool32_t mask, vuint64m1_t maskedoff, vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m1_u64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_tum (vbool16_t mask, vuint64m1_t maskedoff, vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m2_u64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_tum (vbool8_t mask, vuint64m1_t maskedoff, vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m4_u64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_tum (vbool4_t mask, vuint64m1_t maskedoff, vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m8_u64m1_tum(mask, maskedoff, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8mf8_i16m1_tam (vbool64_t mask, vint8mf8_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8mf8_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8mf4_i16m1_tam (vbool32_t mask, vint8mf4_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8mf4_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8mf2_i16m1_tam (vbool16_t mask, vint8mf2_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8mf2_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m1_i16m1_tam (vbool8_t mask, vint8m1_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8m1_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m2_i16m1_tam (vbool4_t mask, vint8m2_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8m2_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m4_i16m1_tam (vbool2_t mask, vint8m4_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8m4_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m8_i16m1_tam (vbool1_t mask, vint8m8_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8m8_i16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16mf4_i32m1_tam (vbool64_t mask, vint16mf4_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16mf4_i32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16mf2_i32m1_tam (vbool32_t mask, vint16mf2_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16mf2_i32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m1_i32m1_tam (vbool16_t mask, vint16m1_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16m1_i32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m2_i32m1_tam (vbool8_t mask, vint16m2_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16m2_i32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m4_i32m1_tam (vbool4_t mask, vint16m4_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16m4_i32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m8_i32m1_tam (vbool2_t mask, vint16m8_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16m8_i32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tam (vbool64_t mask, vint32mf2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32mf2_i64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m1_i64m1_tam (vbool32_t mask, vint32m1_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32m1_i64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m2_i64m1_tam (vbool16_t mask, vint32m2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32m2_i64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m4_i64m1_tam (vbool8_t mask, vint32m4_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32m4_i64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m8_i64m1_tam (vbool4_t mask, vint32m8_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32m8_i64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_tam (vbool64_t mask, vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8mf8_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_tam (vbool32_t mask, vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8mf4_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_tam (vbool16_t mask, vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8mf2_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_tam (vbool8_t mask, vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m1_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_tam (vbool4_t mask, vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m2_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_tam (vbool2_t mask, vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m4_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_tam (vbool1_t mask, vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m8_u16m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_tam (vbool64_t mask, vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16mf4_u32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_tam (vbool32_t mask, vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16mf2_u32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_tam (vbool16_t mask, vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m1_u32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_tam (vbool8_t mask, vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m2_u32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_tam (vbool4_t mask, vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m4_u32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_tam (vbool2_t mask, vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m8_u32m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tam (vbool64_t mask, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32mf2_u64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_tam (vbool32_t mask, vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m1_u64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_tam (vbool16_t mask, vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m2_u64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_tam (vbool8_t mask, vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m4_u64m1_tam(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1_tam( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_tam (vbool4_t mask, vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m8_u64m1_tam(mask, vector, scalar, vl); }