diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -297,6 +297,11 @@ list> suffixes_prototypes> : RVVBuiltinSet; +// IntrinsicTypes is output, op1 [-1, 0] +multiclass RVVOutOp0BuiltinSet> suffixes_prototypes> + : RVVBuiltinSet; + // IntrinsicTypes is output, op1 [-1, 1] multiclass RVVOutOp1BuiltinSet> suffixes_prototypes> @@ -496,22 +501,22 @@ class RVVConvToNarrowingUnsignedBuiltin : RVVConvBuiltin<"Uv", "UvFw", "csi", overloaded_name>; -let HasMaskedOffOperand = false in { +let HasMaskedOffOperand = true in { multiclass RVVSignedReductionBuiltin { - defm "" : RVVOutOp1BuiltinSet; + defm "" : RVVOutOp0BuiltinSet; } multiclass RVVUnsignedReductionBuiltin { - defm "" : RVVOutOp1BuiltinSet; + defm "" : RVVOutOp0BuiltinSet; } multiclass RVVFloatingReductionBuiltin { - defm "" : RVVOutOp1BuiltinSet; + defm "" : RVVOutOp0BuiltinSet; } multiclass RVVFloatingWidenReductionBuiltin { - defm "" : RVVOutOp1BuiltinSet; + defm "" : RVVOutOp0BuiltinSet; } } @@ -2079,7 +2084,7 @@ // 15.1. Vector Single-Width Integer Reduction Instructions let UnMaskedPolicyScheme = HasPassthruOperand, MaskedPolicyScheme = HasPassthruOperand, - IsPrototypeDefaultTU = true, + IsPrototypeDefaultTU = false, HasMaskPolicy = false in { defm vredsum : RVVIntReductionBuiltinSet; defm vredmaxu : RVVUnsignedReductionBuiltin; @@ -2092,11 +2097,11 @@ // 15.2. Vector Widening Integer Reduction Instructions // Vector Widening Integer Reduction Operations -let HasMaskedOffOperand = false in { - defm vwredsum : RVVOutOp1BuiltinSet<"vwredsum", "csi", - [["vs", "vSw", "SwSwvSw"]]>; - defm vwredsumu : RVVOutOp1BuiltinSet<"vwredsumu", "csi", - [["vs", "UvUSw", "USwUSwUvUSw"]]>; +let HasMaskedOffOperand = true in { + defm vwredsum : RVVOutOp0BuiltinSet<"vwredsum", "csi", + [["vs", "vSw", "SwvSw"]]>; + defm vwredsumu : RVVOutOp0BuiltinSet<"vwredsumu", "csi", + [["vs", "UvUSw", "USwUvUSw"]]>; } // 15.3. Vector Single-Width Floating-Point Reduction Instructions diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredmax.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredmax.c @@ -9,271 +9,271 @@ // CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf4_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv1f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1(vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16mf4_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1(vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16mf4_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf2_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv2f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1(vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16mf2_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1(vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16mf2_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m1_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv4f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16m1_f16m1(vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16m1_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16m1_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m2_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv8f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16m2_f16m1(vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16m2_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16m2_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m4_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv16f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16m4_f16m1(vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16m4_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16m4_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m8_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv32f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16m8_f16m1(vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16m8_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16m8_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1(vfloat32m1_t dest, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32mf2_f32m1(dest, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32mf2_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv2f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m1_f32m1(vfloat32m1_t dest, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32m1_f32m1(dest, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m1_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv4f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m2_f32m1(vfloat32m1_t dest, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32m2_f32m1(dest, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m2_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv8f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m4_f32m1(vfloat32m1_t dest, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32m4_f32m1(dest, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m4_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv16f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m8_f32m1(vfloat32m1_t dest, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32m8_f32m1(dest, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m8_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv1f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m1_f64m1(vfloat64m1_t dest, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmax_vs_f64m1_f64m1(dest, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m1_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv2f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m2_f64m1(vfloat64m1_t dest, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmax_vs_f64m2_f64m1(dest, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m2_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv4f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m4_f64m1(vfloat64m1_t dest, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmax_vs_f64m4_f64m1(dest, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m4_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv8f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m8_f64m1(vfloat64m1_t dest, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmax_vs_f64m8_f64m1(dest, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m8_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf4_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16mf4_f16m1_m(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16mf4_f16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf2_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16mf2_f16m1_m(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16mf2_f16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m1_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16m1_f16m1_m(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16m1_f16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m2_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16m2_f16m1_m(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16m2_f16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m4_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16m4_f16m1_m(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16m4_f16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m8_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16m8_f16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dest, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32mf2_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32mf2_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32m1_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m1_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dest, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32m2_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m2_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dest, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32m4_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m4_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dest, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32m8_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m8_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmax_vs_f64m1_f64m1_m(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m1_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dest, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmax_vs_f64m2_f64m1_m(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m2_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dest, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmax_vs_f64m4_f64m1_m(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m4_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dest, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmax_vs_f64m8_f64m1_m(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m8_f64m1_m(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredmin.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredmin.c @@ -9,271 +9,271 @@ // CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf4_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv1f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1(vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16mf4_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1(vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16mf4_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf2_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv2f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1(vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16mf2_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1(vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16mf2_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m1_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv4f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16m1_f16m1(vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16m1_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16m1_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m2_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv8f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16m2_f16m1(vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16m2_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16m2_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m4_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv16f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16m4_f16m1(vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16m4_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16m4_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m8_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv32f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16m8_f16m1(vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16m8_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16m8_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1(vfloat32m1_t dest, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32mf2_f32m1(dest, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32mf2_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv2f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m1_f32m1(vfloat32m1_t dest, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32m1_f32m1(dest, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m1_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv4f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m2_f32m1(vfloat32m1_t dest, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32m2_f32m1(dest, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m2_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv8f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m4_f32m1(vfloat32m1_t dest, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32m4_f32m1(dest, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m4_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv16f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m8_f32m1(vfloat32m1_t dest, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32m8_f32m1(dest, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m8_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv1f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m1_f64m1(vfloat64m1_t dest, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmin_vs_f64m1_f64m1(dest, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m1_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv2f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m2_f64m1(vfloat64m1_t dest, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmin_vs_f64m2_f64m1(dest, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m2_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv4f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m4_f64m1(vfloat64m1_t dest, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmin_vs_f64m4_f64m1(dest, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m4_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv8f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m8_f64m1(vfloat64m1_t dest, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmin_vs_f64m8_f64m1(dest, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m8_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf4_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16mf4_f16m1_m(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16mf4_f16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf2_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16mf2_f16m1_m(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16mf2_f16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m1_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16m1_f16m1_m(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16m1_f16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m2_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16m2_f16m1_m(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16m2_f16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m4_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16m4_f16m1_m(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16m4_f16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m8_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16m8_f16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dest, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32mf2_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32mf2_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32m1_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m1_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dest, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32m2_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m2_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dest, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32m4_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m4_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dest, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32m8_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m8_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmin_vs_f64m1_f64m1_m(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m1_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dest, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmin_vs_f64m2_f64m1_m(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m2_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dest, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmin_vs_f64m4_f64m1_m(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m4_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dest, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmin_vs_f64m8_f64m1_m(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m8_f64m1_m(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredosum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredosum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredosum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredosum.c @@ -9,271 +9,271 @@ // CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf4_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv1f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1(vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16mf4_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1(vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16mf4_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf2_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv2f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1(vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16mf2_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1(vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16mf2_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m1_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv4f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16m1_f16m1(vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16m1_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m1_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m2_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv8f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16m2_f16m1(vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16m2_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m2_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m4_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv16f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16m4_f16m1(vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16m4_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m4_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m8_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv32f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16m8_f16m1(vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16m8_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m8_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1(vfloat32m1_t dest, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32mf2_f32m1(dest, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32mf2_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv2f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m1_f32m1(vfloat32m1_t dest, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32m1_f32m1(dest, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m1_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv4f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m2_f32m1(vfloat32m1_t dest, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32m2_f32m1(dest, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m2_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv8f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m4_f32m1(vfloat32m1_t dest, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32m4_f32m1(dest, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m4_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv16f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m8_f32m1(vfloat32m1_t dest, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32m8_f32m1(dest, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m8_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv1f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m1_f64m1(vfloat64m1_t dest, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredosum_vs_f64m1_f64m1(dest, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m1_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv2f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m2_f64m1(vfloat64m1_t dest, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredosum_vs_f64m2_f64m1(dest, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m2_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv4f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m4_f64m1(vfloat64m1_t dest, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredosum_vs_f64m4_f64m1(dest, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m4_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv8f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m8_f64m1(vfloat64m1_t dest, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredosum_vs_f64m8_f64m1(dest, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m8_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf4_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16mf4_f16m1_m(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16mf4_f16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf2_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16mf2_f16m1_m(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16mf2_f16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m1_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16m1_f16m1_m(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m1_f16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m2_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16m2_f16m1_m(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m2_f16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m4_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16m4_f16m1_m(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m4_f16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m8_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m8_f16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dest, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32mf2_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32mf2_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32m1_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m1_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dest, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32m2_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m2_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dest, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32m4_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m4_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dest, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32m8_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m8_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredosum_vs_f64m1_f64m1_m(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m1_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dest, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredosum_vs_f64m2_f64m1_m(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m2_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dest, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredosum_vs_f64m4_f64m1_m(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m4_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dest, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredosum_vs_f64m8_f64m1_m(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m8_f64m1_m(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredusum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredusum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredusum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredusum.c @@ -9,271 +9,271 @@ // CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf4_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv1f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1(vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16mf4_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1(vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16mf4_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf2_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv2f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1(vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16mf2_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1(vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16mf2_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m1_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv4f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16m1_f16m1(vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16m1_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m1_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m2_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv8f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16m2_f16m1(vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16m2_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m2_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m4_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv16f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16m4_f16m1(vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16m4_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m4_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m8_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv32f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16m8_f16m1(vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16m8_f16m1(dest, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m8_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv1f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1(vfloat32m1_t dest, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32mf2_f32m1(dest, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32mf2_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv2f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32m1_f32m1(vfloat32m1_t dest, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32m1_f32m1(dest, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m1_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv4f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32m2_f32m1(vfloat32m1_t dest, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32m2_f32m1(dest, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m2_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv8f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32m4_f32m1(vfloat32m1_t dest, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32m4_f32m1(dest, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m4_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv16f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32m8_f32m1(vfloat32m1_t dest, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32m8_f32m1(dest, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m8_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv1f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredusum_vs_f64m1_f64m1(vfloat64m1_t dest, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredusum_vs_f64m1_f64m1(dest, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m1_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv2f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredusum_vs_f64m2_f64m1(vfloat64m1_t dest, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredusum_vs_f64m2_f64m1(dest, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m2_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv4f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredusum_vs_f64m4_f64m1(vfloat64m1_t dest, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredusum_vs_f64m4_f64m1(dest, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m4_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv8f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredusum_vs_f64m8_f64m1(vfloat64m1_t dest, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredusum_vs_f64m8_f64m1(dest, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m8_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf4_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16mf4_f16m1_m(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16mf4_f16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf2_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16mf2_f16m1_m(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16mf2_f16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m1_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16m1_f16m1_m(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m1_f16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m2_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16m2_f16m1_m(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m2_f16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m4_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16m4_f16m1_m(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m4_f16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m8_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m8_f16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dest, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32mf2_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32mf2_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32m1_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m1_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dest, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32m2_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m2_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dest, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32m4_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m4_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dest, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32m8_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m8_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredusum_vs_f64m1_f64m1_m(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m1_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dest, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredusum_vs_f64m2_f64m1_m(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m2_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dest, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredusum_vs_f64m4_f64m1_m(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m4_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dest, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredusum_vs_f64m8_f64m1_m(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m8_f64m1_m(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwredosum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwredosum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwredosum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwredosum.c @@ -9,199 +9,199 @@ // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv1f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1(vfloat32m1_t dest, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16mf4_f32m1(dest, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1(vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_vs_f16mf4_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv2f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1(vfloat32m1_t dest, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16mf2_f32m1(dest, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1(vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_vs_f16mf2_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m1_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv4f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1(vfloat32m1_t dest, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16m1_f32m1(dest, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1(vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_vs_f16m1_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv8f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1(vfloat32m1_t dest, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16m2_f32m1(dest, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1(vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_vs_f16m2_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv16f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1(vfloat32m1_t dest, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16m4_f32m1(dest, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1(vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_vs_f16m4_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m8_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv32f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1(vfloat32m1_t dest, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16m8_f32m1(dest, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1(vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_vs_f16m8_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1(vfloat64m1_t dest, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32mf2_f64m1(dest, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1(vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_vs_f32mf2_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv2f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1(vfloat64m1_t dest, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32m1_f64m1(dest, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1(vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_vs_f32m1_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv4f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1(vfloat64m1_t dest, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32m2_f64m1(dest, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1(vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_vs_f32m2_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv8f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1(vfloat64m1_t dest, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32m4_f64m1(dest, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1(vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_vs_f32m4_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv16f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1(vfloat64m1_t dest, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32m8_f64m1(dest, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1(vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_vs_f32m8_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv1f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_m(vbool64_t mask, vfloat32m1_t dest, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16mf4_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_m(vbool64_t mask, vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_vs_f16mf4_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv2f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_m(vbool32_t mask, vfloat32m1_t dest, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16mf2_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_vs_f16mf2_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m1_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv4f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat32m1_t dest, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16m1_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_vs_f16m1_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv8f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat32m1_t dest, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16m2_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_vs_f16m2_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv16f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat32m1_t dest, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16m4_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_vs_f16m4_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m8_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat32m1_t dest, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16m8_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_vs_f16m8_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dest, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32mf2_f64m1_m(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_vs_f32mf2_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dest, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32m1_f64m1_m(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_vs_f32m1_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dest, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32m2_f64m1_m(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_vs_f32m2_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dest, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32m4_f64m1_m(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_vs_f32m4_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dest, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32m8_f64m1_m(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_vs_f32m8_f64m1_m(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwredusum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwredusum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwredusum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwredusum.c @@ -9,199 +9,199 @@ // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv1f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1(vfloat32m1_t dest, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16mf4_f32m1(dest, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1(vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_vs_f16mf4_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv2f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1(vfloat32m1_t dest, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16mf2_f32m1(dest, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1(vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_vs_f16mf2_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m1_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv4f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1(vfloat32m1_t dest, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16m1_f32m1(dest, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1(vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_vs_f16m1_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv8f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1(vfloat32m1_t dest, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16m2_f32m1(dest, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1(vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_vs_f16m2_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv16f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1(vfloat32m1_t dest, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16m4_f32m1(dest, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1(vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_vs_f16m4_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m8_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv32f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1(vfloat32m1_t dest, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16m8_f32m1(dest, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1(vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_vs_f16m8_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv1f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1(vfloat64m1_t dest, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32mf2_f64m1(dest, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1(vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_vs_f32mf2_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m1_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv2f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1(vfloat64m1_t dest, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32m1_f64m1(dest, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1(vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_vs_f32m1_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv4f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1(vfloat64m1_t dest, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32m2_f64m1(dest, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1(vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_vs_f32m2_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m4_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv8f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1(vfloat64m1_t dest, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32m4_f64m1(dest, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1(vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_vs_f32m4_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m8_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv16f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1(vfloat64m1_t dest, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32m8_f64m1(dest, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1(vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_vs_f32m8_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv1f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_m(vbool64_t mask, vfloat32m1_t dest, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16mf4_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_m(vbool64_t mask, vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_vs_f16mf4_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv2f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_m(vbool32_t mask, vfloat32m1_t dest, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16mf2_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_vs_f16mf2_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m1_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv4f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat32m1_t dest, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16m1_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_vs_f16m1_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv8f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat32m1_t dest, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16m2_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_vs_f16m2_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv16f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat32m1_t dest, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16m4_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_vs_f16m4_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m8_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv32f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat32m1_t dest, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16m8_f32m1_m(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_vs_f16m8_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dest, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32mf2_f64m1_m(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_vs_f32mf2_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dest, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32m1_f64m1_m(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_vs_f32m1_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dest, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32m2_f64m1_m(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_vs_f32m2_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dest, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32m4_f64m1_m(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_vs_f32m4_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dest, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32m8_f64m1_m(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_vs_f32m8_f64m1_m(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredand.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredand.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredand.c @@ -8,793 +8,793 @@ // CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8mf8_i8m1(vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8mf8_i8m1(dest, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8mf8_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8mf4_i8m1(vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8mf4_i8m1(dest, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8mf4_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8mf2_i8m1(vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8mf2_i8m1(dest, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8mf2_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m1_i8m1(vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8m1_i8m1(dest, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8m1_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m2_i8m1(vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8m2_i8m1(dest, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8m2_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m4_i8m1(vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8m4_i8m1(dest, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8m4_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m8_i8m1(vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8m8_i8m1(dest, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8m8_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16mf4_i16m1(vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredand_vs_i16mf4_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16mf4_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16mf2_i16m1(vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredand_vs_i16mf2_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16mf2_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m1_i16m1(vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredand_vs_i16m1_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16m1_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m2_i16m1(vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredand_vs_i16m2_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16m2_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m4_i16m1(vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredand_vs_i16m4_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16m4_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m8_i16m1(vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredand_vs_i16m8_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16m8_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32mf2_i32m1(vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredand_vs_i32mf2_i32m1(dest, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32mf2_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m1_i32m1(vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredand_vs_i32m1_i32m1(dest, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32m1_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m2_i32m1(vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredand_vs_i32m2_i32m1(dest, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32m2_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m4_i32m1(vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredand_vs_i32m4_i32m1(dest, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32m4_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m8_i32m1(vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredand_vs_i32m8_i32m1(dest, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32m8_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m1_i64m1(vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredand_vs_i64m1_i64m1(dest, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredand_vs_i64m1_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m2_i64m1(vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredand_vs_i64m2_i64m1(dest, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredand_vs_i64m2_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m4_i64m1(vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredand_vs_i64m4_i64m1(dest, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredand_vs_i64m4_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m8_i64m1(vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredand_vs_i64m8_i64m1(dest, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredand_vs_i64m8_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8mf8_u8m1(vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8mf8_u8m1(dest, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8mf8_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8mf4_u8m1(vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8mf4_u8m1(dest, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8mf4_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8mf2_u8m1(vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8mf2_u8m1(dest, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8mf2_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m1_u8m1(vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8m1_u8m1(dest, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8m1_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m2_u8m1(vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8m2_u8m1(dest, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8m2_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m4_u8m1(vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8m4_u8m1(dest, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8m4_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m8_u8m1(vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8m8_u8m1(dest, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8m8_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16mf4_u16m1(vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16mf4_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16mf4_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16mf2_u16m1(vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16mf2_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16mf2_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m1_u16m1(vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16m1_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m1_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m2_u16m1(vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16m2_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m2_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m4_u16m1(vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16m4_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m4_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m8_u16m1(vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16m8_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m8_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32mf2_u32m1(vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32mf2_u32m1(dest, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32mf2_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m1_u32m1(vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32m1_u32m1(dest, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m1_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m2_u32m1(vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32m2_u32m1(dest, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m2_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m4_u32m1(vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32m4_u32m1(dest, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m4_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m8_u32m1(vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32m8_u32m1(dest, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m8_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m1_u64m1(vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredand_vs_u64m1_u64m1(dest, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m1_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m2_u64m1(vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredand_vs_u64m2_u64m1(dest, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m2_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m4_u64m1(vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredand_vs_u64m4_u64m1(dest, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m4_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m8_u64m1(vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredand_vs_u64m8_u64m1(dest, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m8_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8mf8_i8m1_m(mask, dest, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8mf8_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8mf4_i8m1_m(mask, dest, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8mf4_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8mf2_i8m1_m(mask, dest, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8mf2_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8m1_i8m1_m(mask, dest, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8m1_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8m2_i8m1_m(mask, dest, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8m2_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8m4_i8m1_m(mask, dest, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8m4_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8m8_i8m1_m(mask, dest, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8m8_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredand_vs_i16mf4_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16mf4_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredand_vs_i16mf2_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16mf2_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredand_vs_i16m1_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16m1_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredand_vs_i16m2_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16m2_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredand_vs_i16m4_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16m4_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredand_vs_i16m8_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16m8_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredand_vs_i32mf2_i32m1_m(mask, dest, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32mf2_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredand_vs_i32m1_i32m1_m(mask, dest, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32m1_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredand_vs_i32m2_i32m1_m(mask, dest, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32m2_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredand_vs_i32m4_i32m1_m(mask, dest, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32m4_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredand_vs_i32m8_i32m1_m(mask, dest, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32m8_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredand_vs_i64m1_i64m1_m(mask, dest, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredand_vs_i64m1_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredand_vs_i64m2_i64m1_m(mask, dest, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredand_vs_i64m2_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredand_vs_i64m4_i64m1_m(mask, dest, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredand_vs_i64m4_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredand_vs_i64m8_i64m1_m(mask, dest, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredand_vs_i64m8_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8mf8_u8m1_m(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8mf8_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8mf4_u8m1_m(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8mf4_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8mf2_u8m1_m(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8mf2_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8m1_u8m1_m(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8m1_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8m2_u8m1_m(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8m2_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8m4_u8m1_m(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8m4_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8m8_u8m1_m(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8m8_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16mf4_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16mf4_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16mf2_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16mf2_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16m1_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m1_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16m2_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m2_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16m4_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m4_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16m8_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m8_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32mf2_u32m1_m(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32mf2_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32m1_u32m1_m(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m1_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32m2_u32m1_m(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m2_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32m4_u32m1_m(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m4_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32m8_u32m1_m(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m8_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredand_vs_u64m1_u64m1_m(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m1_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredand_vs_u64m2_u64m1_m(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m2_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredand_vs_u64m4_u64m1_m(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m4_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredand_vs_u64m8_u64m1_m(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m8_u64m1_m(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmax.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmax.c @@ -8,397 +8,397 @@ // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8mf8_i8m1(vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8mf8_i8m1(dest, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8mf8_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8mf4_i8m1(vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8mf4_i8m1(dest, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8mf4_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8mf2_i8m1(vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8mf2_i8m1(dest, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8mf2_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m1_i8m1(vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8m1_i8m1(dest, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8m1_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m2_i8m1(vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8m2_i8m1(dest, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8m2_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m4_i8m1(vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8m4_i8m1(dest, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8m4_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m8_i8m1(vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8m8_i8m1(dest, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8m8_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16mf4_i16m1(vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16mf4_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16mf4_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16mf2_i16m1(vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16mf2_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16mf2_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m1_i16m1(vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16m1_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16m1_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m2_i16m1(vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16m2_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16m2_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m4_i16m1(vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16m4_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16m4_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m8_i16m1(vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16m8_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16m8_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32mf2_i32m1(vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32mf2_i32m1(dest, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32mf2_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m1_i32m1(vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32m1_i32m1(dest, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32m1_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m2_i32m1(vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32m2_i32m1(dest, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32m2_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m4_i32m1(vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32m4_i32m1(dest, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32m4_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m8_i32m1(vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32m8_i32m1(dest, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32m8_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m1_i64m1(vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredmax_vs_i64m1_i64m1(dest, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_vs_i64m1_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m2_i64m1(vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredmax_vs_i64m2_i64m1(dest, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_vs_i64m2_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m4_i64m1(vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredmax_vs_i64m4_i64m1(dest, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_vs_i64m4_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m8_i64m1(vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredmax_vs_i64m8_i64m1(dest, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_vs_i64m8_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8mf8_i8m1_m(mask, dest, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8mf8_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8mf4_i8m1_m(mask, dest, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8mf4_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8mf2_i8m1_m(mask, dest, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8mf2_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8m1_i8m1_m(mask, dest, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8m1_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8m2_i8m1_m(mask, dest, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8m2_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8m4_i8m1_m(mask, dest, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8m4_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8m8_i8m1_m(mask, dest, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8m8_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16mf4_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16mf4_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16mf2_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16mf2_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16m1_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16m1_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16m2_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16m2_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16m4_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16m4_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16m8_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16m8_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32mf2_i32m1_m(mask, dest, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32mf2_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32m1_i32m1_m(mask, dest, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32m1_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32m2_i32m1_m(mask, dest, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32m2_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32m4_i32m1_m(mask, dest, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32m4_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32m8_i32m1_m(mask, dest, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32m8_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredmax_vs_i64m1_i64m1_m(mask, dest, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_vs_i64m1_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredmax_vs_i64m2_i64m1_m(mask, dest, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_vs_i64m2_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredmax_vs_i64m4_i64m1_m(mask, dest, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_vs_i64m4_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredmax_vs_i64m8_i64m1_m(mask, dest, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_vs_i64m8_i64m1_m(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmaxu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmaxu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmaxu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmaxu.c @@ -8,397 +8,397 @@ // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1(vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8mf8_u8m1(dest, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8mf8_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1(vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8mf4_u8m1(dest, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8mf4_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1(vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8mf2_u8m1(dest, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8mf2_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m1_u8m1(vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8m1_u8m1(dest, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8m1_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m2_u8m1(vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8m2_u8m1(dest, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8m2_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m4_u8m1(vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8m4_u8m1(dest, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8m4_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m8_u8m1(vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8m8_u8m1(dest, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8m8_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1(vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16mf4_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16mf4_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1(vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16mf2_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16mf2_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m1_u16m1(vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16m1_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m1_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m2_u16m1(vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16m2_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m2_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m4_u16m1(vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16m4_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m4_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m8_u16m1(vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16m8_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m8_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1(vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32mf2_u32m1(dest, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32mf2_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m1_u32m1(vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32m1_u32m1(dest, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m1_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m2_u32m1(vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32m2_u32m1(dest, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m2_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m4_u32m1(vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32m4_u32m1(dest, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m4_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m8_u32m1(vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32m8_u32m1(dest, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m8_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m1_u64m1(vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredmaxu_vs_u64m1_u64m1(dest, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m1_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m2_u64m1(vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredmaxu_vs_u64m2_u64m1(dest, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m2_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m4_u64m1(vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredmaxu_vs_u64m4_u64m1(dest, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m4_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m8_u64m1(vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredmaxu_vs_u64m8_u64m1(dest, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m8_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8mf8_u8m1_m(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8mf8_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8mf4_u8m1_m(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8mf4_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8mf2_u8m1_m(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8mf2_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8m1_u8m1_m(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8m1_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8m2_u8m1_m(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8m2_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8m4_u8m1_m(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8m4_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8m8_u8m1_m(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8m8_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16mf4_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16mf4_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16mf2_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16mf2_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16m1_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m1_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16m2_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m2_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16m4_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m4_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16m8_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m8_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32mf2_u32m1_m(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32mf2_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32m1_u32m1_m(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m1_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32m2_u32m1_m(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m2_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32m4_u32m1_m(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m4_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32m8_u32m1_m(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m8_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredmaxu_vs_u64m1_u64m1_m(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m1_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredmaxu_vs_u64m2_u64m1_m(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m2_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredmaxu_vs_u64m4_u64m1_m(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m4_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredmaxu_vs_u64m8_u64m1_m(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m8_u64m1_m(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmin.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmin.c @@ -8,397 +8,397 @@ // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8mf8_i8m1(vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8mf8_i8m1(dest, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8mf8_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8mf4_i8m1(vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8mf4_i8m1(dest, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8mf4_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8mf2_i8m1(vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8mf2_i8m1(dest, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8mf2_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m1_i8m1(vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8m1_i8m1(dest, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8m1_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m2_i8m1(vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8m2_i8m1(dest, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8m2_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m4_i8m1(vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8m4_i8m1(dest, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8m4_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m8_i8m1(vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8m8_i8m1(dest, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8m8_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16mf4_i16m1(vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16mf4_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16mf4_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16mf2_i16m1(vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16mf2_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16mf2_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m1_i16m1(vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16m1_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16m1_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m2_i16m1(vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16m2_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16m2_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m4_i16m1(vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16m4_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16m4_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m8_i16m1(vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16m8_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16m8_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32mf2_i32m1(vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32mf2_i32m1(dest, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32mf2_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m1_i32m1(vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32m1_i32m1(dest, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32m1_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m2_i32m1(vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32m2_i32m1(dest, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32m2_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m4_i32m1(vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32m4_i32m1(dest, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32m4_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m8_i32m1(vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32m8_i32m1(dest, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32m8_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m1_i64m1(vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredmin_vs_i64m1_i64m1(dest, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_vs_i64m1_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m2_i64m1(vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredmin_vs_i64m2_i64m1(dest, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_vs_i64m2_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m4_i64m1(vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredmin_vs_i64m4_i64m1(dest, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_vs_i64m4_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m8_i64m1(vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredmin_vs_i64m8_i64m1(dest, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_vs_i64m8_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8mf8_i8m1_m(mask, dest, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8mf8_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8mf4_i8m1_m(mask, dest, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8mf4_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8mf2_i8m1_m(mask, dest, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8mf2_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8m1_i8m1_m(mask, dest, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8m1_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8m2_i8m1_m(mask, dest, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8m2_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8m4_i8m1_m(mask, dest, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8m4_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8m8_i8m1_m(mask, dest, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8m8_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16mf4_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16mf4_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16mf2_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16mf2_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16m1_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16m1_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16m2_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16m2_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16m4_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16m4_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16m8_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16m8_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32mf2_i32m1_m(mask, dest, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32mf2_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32m1_i32m1_m(mask, dest, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32m1_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32m2_i32m1_m(mask, dest, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32m2_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32m4_i32m1_m(mask, dest, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32m4_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32m8_i32m1_m(mask, dest, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32m8_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredmin_vs_i64m1_i64m1_m(mask, dest, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_vs_i64m1_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredmin_vs_i64m2_i64m1_m(mask, dest, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_vs_i64m2_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredmin_vs_i64m4_i64m1_m(mask, dest, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_vs_i64m4_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredmin_vs_i64m8_i64m1_m(mask, dest, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_vs_i64m8_i64m1_m(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredminu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredminu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredminu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredminu.c @@ -8,397 +8,397 @@ // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8mf8_u8m1(vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8mf8_u8m1(dest, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8mf8_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8mf4_u8m1(vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8mf4_u8m1(dest, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8mf4_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8mf2_u8m1(vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8mf2_u8m1(dest, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8mf2_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m1_u8m1(vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8m1_u8m1(dest, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8m1_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m2_u8m1(vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8m2_u8m1(dest, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8m2_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m4_u8m1(vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8m4_u8m1(dest, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8m4_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m8_u8m1(vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8m8_u8m1(dest, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8m8_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16mf4_u16m1(vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16mf4_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16mf4_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16mf2_u16m1(vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16mf2_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16mf2_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m1_u16m1(vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16m1_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m1_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m2_u16m1(vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16m2_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m2_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m4_u16m1(vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16m4_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m4_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m8_u16m1(vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16m8_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m8_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32mf2_u32m1(vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32mf2_u32m1(dest, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32mf2_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m1_u32m1(vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32m1_u32m1(dest, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m1_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m2_u32m1(vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32m2_u32m1(dest, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m2_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m4_u32m1(vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32m4_u32m1(dest, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m4_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m8_u32m1(vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32m8_u32m1(dest, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m8_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m1_u64m1(vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredminu_vs_u64m1_u64m1(dest, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m1_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m2_u64m1(vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredminu_vs_u64m2_u64m1(dest, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m2_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m4_u64m1(vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredminu_vs_u64m4_u64m1(dest, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m4_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m8_u64m1(vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredminu_vs_u64m8_u64m1(dest, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m8_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8mf8_u8m1_m(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8mf8_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8mf4_u8m1_m(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8mf4_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8mf2_u8m1_m(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8mf2_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8m1_u8m1_m(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8m1_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8m2_u8m1_m(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8m2_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8m4_u8m1_m(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8m4_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8m8_u8m1_m(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8m8_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16mf4_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16mf4_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16mf2_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16mf2_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16m1_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m1_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16m2_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m2_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16m4_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m4_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16m8_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m8_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32mf2_u32m1_m(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32mf2_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32m1_u32m1_m(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m1_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32m2_u32m1_m(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m2_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32m4_u32m1_m(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m4_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32m8_u32m1_m(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m8_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredminu_vs_u64m1_u64m1_m(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m1_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredminu_vs_u64m2_u64m1_m(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m2_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredminu_vs_u64m4_u64m1_m(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m4_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredminu_vs_u64m8_u64m1_m(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m8_u64m1_m(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredor.c @@ -8,793 +8,793 @@ // CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8mf8_i8m1(vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8mf8_i8m1(dest, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8mf8_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8mf4_i8m1(vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8mf4_i8m1(dest, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8mf4_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8mf2_i8m1(vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8mf2_i8m1(dest, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8mf2_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m1_i8m1(vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8m1_i8m1(dest, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8m1_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m2_i8m1(vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8m2_i8m1(dest, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8m2_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m4_i8m1(vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8m4_i8m1(dest, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8m4_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m8_i8m1(vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8m8_i8m1(dest, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8m8_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16mf4_i16m1(vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredor_vs_i16mf4_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16mf4_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16mf2_i16m1(vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredor_vs_i16mf2_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16mf2_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m1_i16m1(vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredor_vs_i16m1_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16m1_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m2_i16m1(vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredor_vs_i16m2_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16m2_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m4_i16m1(vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredor_vs_i16m4_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16m4_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m8_i16m1(vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredor_vs_i16m8_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16m8_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32mf2_i32m1(vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredor_vs_i32mf2_i32m1(dest, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32mf2_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m1_i32m1(vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredor_vs_i32m1_i32m1(dest, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32m1_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m2_i32m1(vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredor_vs_i32m2_i32m1(dest, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32m2_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m4_i32m1(vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredor_vs_i32m4_i32m1(dest, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32m4_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m8_i32m1(vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredor_vs_i32m8_i32m1(dest, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32m8_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m1_i64m1(vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredor_vs_i64m1_i64m1(dest, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredor_vs_i64m1_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m2_i64m1(vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredor_vs_i64m2_i64m1(dest, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredor_vs_i64m2_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m4_i64m1(vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredor_vs_i64m4_i64m1(dest, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredor_vs_i64m4_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m8_i64m1(vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredor_vs_i64m8_i64m1(dest, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredor_vs_i64m8_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8mf8_u8m1(vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8mf8_u8m1(dest, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8mf8_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8mf4_u8m1(vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8mf4_u8m1(dest, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8mf4_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8mf2_u8m1(vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8mf2_u8m1(dest, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8mf2_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m1_u8m1(vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8m1_u8m1(dest, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8m1_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m2_u8m1(vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8m2_u8m1(dest, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8m2_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m4_u8m1(vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8m4_u8m1(dest, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8m4_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m8_u8m1(vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8m8_u8m1(dest, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8m8_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16mf4_u16m1(vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16mf4_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16mf4_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16mf2_u16m1(vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16mf2_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16mf2_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m1_u16m1(vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16m1_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16m1_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m2_u16m1(vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16m2_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16m2_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m4_u16m1(vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16m4_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16m4_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m8_u16m1(vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16m8_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16m8_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32mf2_u32m1(vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32mf2_u32m1(dest, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32mf2_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m1_u32m1(vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32m1_u32m1(dest, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32m1_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m2_u32m1(vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32m2_u32m1(dest, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32m2_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m4_u32m1(vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32m4_u32m1(dest, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32m4_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m8_u32m1(vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32m8_u32m1(dest, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32m8_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m1_u64m1(vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredor_vs_u64m1_u64m1(dest, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_vs_u64m1_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m2_u64m1(vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredor_vs_u64m2_u64m1(dest, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_vs_u64m2_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m4_u64m1(vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredor_vs_u64m4_u64m1(dest, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_vs_u64m4_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m8_u64m1(vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredor_vs_u64m8_u64m1(dest, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_vs_u64m8_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8mf8_i8m1_m(mask, dest, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8mf8_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8mf4_i8m1_m(mask, dest, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8mf4_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8mf2_i8m1_m(mask, dest, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8mf2_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8m1_i8m1_m(mask, dest, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8m1_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8m2_i8m1_m(mask, dest, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8m2_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8m4_i8m1_m(mask, dest, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8m4_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8m8_i8m1_m(mask, dest, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8m8_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredor_vs_i16mf4_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16mf4_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredor_vs_i16mf2_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16mf2_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredor_vs_i16m1_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16m1_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredor_vs_i16m2_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16m2_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredor_vs_i16m4_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16m4_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredor_vs_i16m8_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16m8_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredor_vs_i32mf2_i32m1_m(mask, dest, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32mf2_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredor_vs_i32m1_i32m1_m(mask, dest, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32m1_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredor_vs_i32m2_i32m1_m(mask, dest, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32m2_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredor_vs_i32m4_i32m1_m(mask, dest, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32m4_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredor_vs_i32m8_i32m1_m(mask, dest, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32m8_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredor_vs_i64m1_i64m1_m(mask, dest, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredor_vs_i64m1_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredor_vs_i64m2_i64m1_m(mask, dest, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredor_vs_i64m2_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredor_vs_i64m4_i64m1_m(mask, dest, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredor_vs_i64m4_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredor_vs_i64m8_i64m1_m(mask, dest, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredor_vs_i64m8_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8mf8_u8m1_m(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8mf8_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8mf4_u8m1_m(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8mf4_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8mf2_u8m1_m(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8mf2_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8m1_u8m1_m(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8m1_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8m2_u8m1_m(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8m2_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8m4_u8m1_m(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8m4_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8m8_u8m1_m(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8m8_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16mf4_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16mf4_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16mf2_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16mf2_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16m1_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16m1_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16m2_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16m2_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16m4_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16m4_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16m8_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16m8_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32mf2_u32m1_m(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32mf2_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32m1_u32m1_m(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32m1_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32m2_u32m1_m(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32m2_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32m4_u32m1_m(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32m4_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32m8_u32m1_m(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32m8_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredor_vs_u64m1_u64m1_m(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_vs_u64m1_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredor_vs_u64m2_u64m1_m(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_vs_u64m2_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredor_vs_u64m4_u64m1_m(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_vs_u64m4_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredor_vs_u64m8_u64m1_m(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_vs_u64m8_u64m1_m(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredsum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredsum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredsum.c @@ -8,793 +8,793 @@ // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8mf8_i8m1(vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8mf8_i8m1(dest, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8mf8_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8mf4_i8m1(vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8mf4_i8m1(dest, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8mf4_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8mf2_i8m1(vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8mf2_i8m1(dest, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8mf2_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m1_i8m1(vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8m1_i8m1(dest, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8m1_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m2_i8m1(vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8m2_i8m1(dest, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8m2_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m4_i8m1(vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8m4_i8m1(dest, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8m4_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m8_i8m1(vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8m8_i8m1(dest, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8m8_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16mf4_i16m1(vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16mf4_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16mf4_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16mf2_i16m1(vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16mf2_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16mf2_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m1_i16m1(vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16m1_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16m1_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m2_i16m1(vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16m2_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16m2_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m4_i16m1(vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16m4_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16m4_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m8_i16m1(vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16m8_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16m8_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32mf2_i32m1(vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32mf2_i32m1(dest, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32mf2_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m1_i32m1(vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32m1_i32m1(dest, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32m1_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m2_i32m1(vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32m2_i32m1(dest, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32m2_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m4_i32m1(vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32m4_i32m1(dest, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32m4_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m8_i32m1(vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32m8_i32m1(dest, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32m8_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m1_i64m1(vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredsum_vs_i64m1_i64m1(dest, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_vs_i64m1_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m2_i64m1(vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredsum_vs_i64m2_i64m1(dest, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_vs_i64m2_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m4_i64m1(vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredsum_vs_i64m4_i64m1(dest, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_vs_i64m4_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m8_i64m1(vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredsum_vs_i64m8_i64m1(dest, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_vs_i64m8_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8mf8_u8m1(vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8mf8_u8m1(dest, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8mf8_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8mf4_u8m1(vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8mf4_u8m1(dest, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8mf4_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8mf2_u8m1(vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8mf2_u8m1(dest, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8mf2_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m1_u8m1(vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8m1_u8m1(dest, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8m1_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m2_u8m1(vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8m2_u8m1(dest, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8m2_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m4_u8m1(vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8m4_u8m1(dest, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8m4_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m8_u8m1(vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8m8_u8m1(dest, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8m8_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16mf4_u16m1(vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16mf4_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16mf4_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16mf2_u16m1(vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16mf2_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16mf2_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m1_u16m1(vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16m1_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m1_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m2_u16m1(vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16m2_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m2_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m4_u16m1(vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16m4_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m4_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m8_u16m1(vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16m8_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m8_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32mf2_u32m1(vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32mf2_u32m1(dest, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32mf2_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m1_u32m1(vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32m1_u32m1(dest, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m1_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m2_u32m1(vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32m2_u32m1(dest, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m2_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m4_u32m1(vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32m4_u32m1(dest, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m4_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m8_u32m1(vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32m8_u32m1(dest, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m8_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m1_u64m1(vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredsum_vs_u64m1_u64m1(dest, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m1_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m2_u64m1(vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredsum_vs_u64m2_u64m1(dest, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m2_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m4_u64m1(vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredsum_vs_u64m4_u64m1(dest, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m4_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m8_u64m1(vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredsum_vs_u64m8_u64m1(dest, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m8_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8mf8_i8m1_m(mask, dest, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8mf8_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8mf4_i8m1_m(mask, dest, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8mf4_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8mf2_i8m1_m(mask, dest, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8mf2_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8m1_i8m1_m(mask, dest, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8m1_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8m2_i8m1_m(mask, dest, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8m2_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8m4_i8m1_m(mask, dest, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8m4_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8m8_i8m1_m(mask, dest, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8m8_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16mf4_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16mf4_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16mf2_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16mf2_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16m1_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16m1_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16m2_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16m2_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16m4_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16m4_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16m8_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16m8_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32mf2_i32m1_m(mask, dest, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32mf2_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32m1_i32m1_m(mask, dest, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32m1_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32m2_i32m1_m(mask, dest, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32m2_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32m4_i32m1_m(mask, dest, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32m4_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32m8_i32m1_m(mask, dest, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32m8_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredsum_vs_i64m1_i64m1_m(mask, dest, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_vs_i64m1_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredsum_vs_i64m2_i64m1_m(mask, dest, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_vs_i64m2_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredsum_vs_i64m4_i64m1_m(mask, dest, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_vs_i64m4_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredsum_vs_i64m8_i64m1_m(mask, dest, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_vs_i64m8_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8mf8_u8m1_m(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8mf8_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8mf4_u8m1_m(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8mf4_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8mf2_u8m1_m(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8mf2_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8m1_u8m1_m(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8m1_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8m2_u8m1_m(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8m2_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8m4_u8m1_m(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8m4_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8m8_u8m1_m(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8m8_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16mf4_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16mf4_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16mf2_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16mf2_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16m1_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m1_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16m2_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m2_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16m4_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m4_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16m8_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m8_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32mf2_u32m1_m(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32mf2_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32m1_u32m1_m(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m1_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32m2_u32m1_m(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m2_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32m4_u32m1_m(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m4_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32m8_u32m1_m(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m8_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredsum_vs_u64m1_u64m1_m(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m1_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredsum_vs_u64m2_u64m1_m(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m2_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredsum_vs_u64m4_u64m1_m(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m4_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredsum_vs_u64m8_u64m1_m(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m8_u64m1_m(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredxor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredxor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredxor.c @@ -8,793 +8,793 @@ // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8mf8_i8m1(vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8mf8_i8m1(dest, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8mf8_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8mf4_i8m1(vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8mf4_i8m1(dest, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8mf4_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8mf2_i8m1(vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8mf2_i8m1(dest, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8mf2_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m1_i8m1(vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8m1_i8m1(dest, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8m1_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m2_i8m1(vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8m2_i8m1(dest, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8m2_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m4_i8m1(vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8m4_i8m1(dest, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8m4_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m8_i8m1(vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8m8_i8m1(dest, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8m8_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16mf4_i16m1(vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16mf4_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16mf4_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16mf2_i16m1(vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16mf2_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16mf2_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m1_i16m1(vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16m1_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16m1_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m2_i16m1(vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16m2_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16m2_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m4_i16m1(vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16m4_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16m4_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m8_i16m1(vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16m8_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16m8_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32mf2_i32m1(vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32mf2_i32m1(dest, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32mf2_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m1_i32m1(vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32m1_i32m1(dest, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32m1_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m2_i32m1(vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32m2_i32m1(dest, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32m2_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m4_i32m1(vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32m4_i32m1(dest, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32m4_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m8_i32m1(vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32m8_i32m1(dest, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32m8_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m1_i64m1(vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredxor_vs_i64m1_i64m1(dest, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_vs_i64m1_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m2_i64m1(vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredxor_vs_i64m2_i64m1(dest, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_vs_i64m2_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m4_i64m1(vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredxor_vs_i64m4_i64m1(dest, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_vs_i64m4_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m8_i64m1(vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredxor_vs_i64m8_i64m1(dest, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_vs_i64m8_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8mf8_u8m1(vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8mf8_u8m1(dest, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8mf8_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8mf4_u8m1(vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8mf4_u8m1(dest, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8mf4_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8mf2_u8m1(vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8mf2_u8m1(dest, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8mf2_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m1_u8m1(vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8m1_u8m1(dest, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8m1_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m2_u8m1(vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8m2_u8m1(dest, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8m2_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m4_u8m1(vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8m4_u8m1(dest, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8m4_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m8_u8m1(vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8m8_u8m1(dest, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8m8_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16mf4_u16m1(vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16mf4_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16mf4_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16mf2_u16m1(vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16mf2_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16mf2_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m1_u16m1(vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16m1_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m1_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m2_u16m1(vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16m2_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m2_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m4_u16m1(vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16m4_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m4_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m8_u16m1(vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16m8_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m8_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32mf2_u32m1(vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32mf2_u32m1(dest, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32mf2_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m1_u32m1(vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32m1_u32m1(dest, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m1_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m2_u32m1(vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32m2_u32m1(dest, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m2_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m4_u32m1(vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32m4_u32m1(dest, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m4_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m8_u32m1(vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32m8_u32m1(dest, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m8_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m1_u64m1(vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredxor_vs_u64m1_u64m1(dest, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m1_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m2_u64m1(vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredxor_vs_u64m2_u64m1(dest, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m2_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m4_u64m1(vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredxor_vs_u64m4_u64m1(dest, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m4_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m8_u64m1(vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredxor_vs_u64m8_u64m1(dest, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m8_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8mf8_i8m1_m(mask, dest, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8mf8_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8mf4_i8m1_m(mask, dest, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8mf4_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8mf2_i8m1_m(mask, dest, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8mf2_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8m1_i8m1_m(mask, dest, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8m1_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8m2_i8m1_m(mask, dest, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8m2_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8m4_i8m1_m(mask, dest, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8m4_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8m8_i8m1_m(mask, dest, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8m8_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16mf4_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16mf4_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16mf2_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16mf2_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16m1_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16m1_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16m2_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16m2_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16m4_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16m4_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16m8_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16m8_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32mf2_i32m1_m(mask, dest, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32mf2_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32m1_i32m1_m(mask, dest, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32m1_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32m2_i32m1_m(mask, dest, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32m2_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32m4_i32m1_m(mask, dest, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32m4_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32m8_i32m1_m(mask, dest, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32m8_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredxor_vs_i64m1_i64m1_m(mask, dest, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_vs_i64m1_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredxor_vs_i64m2_i64m1_m(mask, dest, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_vs_i64m2_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredxor_vs_i64m4_i64m1_m(mask, dest, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_vs_i64m4_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredxor_vs_i64m8_i64m1_m(mask, dest, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_vs_i64m8_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8mf8_u8m1_m(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8mf8_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8mf4_u8m1_m(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8mf4_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8mf2_u8m1_m(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8mf2_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8m1_u8m1_m(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8m1_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8m2_u8m1_m(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8m2_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8m4_u8m1_m(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8m4_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8m8_u8m1_m(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8m8_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16mf4_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16mf4_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16mf2_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16mf2_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16m1_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m1_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16m2_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m2_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16m4_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m4_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16m8_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m8_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32mf2_u32m1_m(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32mf2_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32m1_u32m1_m(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m1_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32m2_u32m1_m(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m2_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32m4_u32m1_m(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m4_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32m8_u32m1_m(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m8_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredxor_vs_u64m1_u64m1_m(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m1_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredxor_vs_u64m2_u64m1_m(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m2_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredxor_vs_u64m4_u64m1_m(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m4_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredxor_vs_u64m8_u64m1_m(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m8_u64m1_m(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwredsum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwredsum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwredsum.c @@ -8,325 +8,325 @@ // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8mf8_i16m1(vint16m1_t dest, vint8mf8_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8mf8_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf8_i16m1(vint8mf8_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8mf8_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8mf4_i16m1(vint16m1_t dest, vint8mf4_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8mf4_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf4_i16m1(vint8mf4_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8mf4_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8mf2_i16m1(vint16m1_t dest, vint8mf2_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8mf2_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf2_i16m1(vint8mf2_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8mf2_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m1_i16m1(vint16m1_t dest, vint8m1_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8m1_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m1_i16m1(vint8m1_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8m1_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m2_i16m1(vint16m1_t dest, vint8m2_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8m2_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m2_i16m1(vint8m2_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8m2_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m4_i16m1(vint16m1_t dest, vint8m4_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8m4_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m4_i16m1(vint8m4_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8m4_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m8_i16m1(vint16m1_t dest, vint8m8_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8m8_i16m1(dest, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m8_i16m1(vint8m8_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8m8_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16mf4_i32m1(vint32m1_t dest, vint16mf4_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16mf4_i32m1(dest, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16mf4_i32m1(vint16mf4_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16mf4_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16mf2_i32m1(vint32m1_t dest, vint16mf2_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16mf2_i32m1(dest, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16mf2_i32m1(vint16mf2_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16mf2_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m1_i32m1(vint32m1_t dest, vint16m1_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16m1_i32m1(dest, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m1_i32m1(vint16m1_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16m1_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m2_i32m1(vint32m1_t dest, vint16m2_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16m2_i32m1(dest, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m2_i32m1(vint16m2_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16m2_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m4_i32m1(vint32m1_t dest, vint16m4_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16m4_i32m1(dest, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m4_i32m1(vint16m4_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16m4_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m8_i32m1(vint32m1_t dest, vint16m8_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16m8_i32m1(dest, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m8_i32m1(vint16m8_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16m8_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32mf2_i64m1(vint64m1_t dest, vint32mf2_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32mf2_i64m1(dest, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32mf2_i64m1(vint32mf2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32mf2_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m1_i64m1(vint64m1_t dest, vint32m1_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32m1_i64m1(dest, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m1_i64m1(vint32m1_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32m1_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m2_i64m1(vint64m1_t dest, vint32m2_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32m2_i64m1(dest, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m2_i64m1(vint32m2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32m2_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m4_i64m1(vint64m1_t dest, vint32m4_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32m4_i64m1(dest, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m4_i64m1(vint32m4_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32m4_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m8_i64m1(vint64m1_t dest, vint32m8_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32m8_i64m1(dest, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m8_i64m1(vint32m8_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32m8_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8mf8_i16m1_m(vbool64_t mask, vint16m1_t dest, vint8mf8_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8mf8_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf8_i16m1_m(vbool64_t mask, vint16m1_t maskedoff, vint8mf8_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8mf8_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8mf4_i16m1_m(vbool32_t mask, vint16m1_t dest, vint8mf4_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8mf4_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf4_i16m1_m(vbool32_t mask, vint16m1_t maskedoff, vint8mf4_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8mf4_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8mf2_i16m1_m(vbool16_t mask, vint16m1_t dest, vint8mf2_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8mf2_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf2_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8mf2_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m1_i16m1_m(vbool8_t mask, vint16m1_t dest, vint8m1_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8m1_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m1_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint8m1_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8m1_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m2_i16m1_m(vbool4_t mask, vint16m1_t dest, vint8m2_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8m2_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m2_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint8m2_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8m2_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m4_i16m1_m(vbool2_t mask, vint16m1_t dest, vint8m4_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8m4_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m4_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint8m4_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8m4_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m8_i16m1_m(vbool1_t mask, vint16m1_t dest, vint8m8_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8m8_i16m1_m(mask, dest, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m8_i16m1_m(vbool1_t mask, vint16m1_t maskedoff, vint8m8_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8m8_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16mf4_i32m1_m(vbool64_t mask, vint32m1_t dest, vint16mf4_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16mf4_i32m1_m(mask, dest, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16mf4_i32m1_m(vbool64_t mask, vint32m1_t maskedoff, vint16mf4_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16mf4_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16mf2_i32m1_m(vbool32_t mask, vint32m1_t dest, vint16mf2_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16mf2_i32m1_m(mask, dest, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16mf2_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16mf2_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m1_i32m1_m(vbool16_t mask, vint32m1_t dest, vint16m1_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16m1_i32m1_m(mask, dest, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m1_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint16m1_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16m1_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m2_i32m1_m(vbool8_t mask, vint32m1_t dest, vint16m2_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16m2_i32m1_m(mask, dest, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m2_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint16m2_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16m2_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m4_i32m1_m(vbool4_t mask, vint32m1_t dest, vint16m4_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16m4_i32m1_m(mask, dest, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m4_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint16m4_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16m4_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m8_i32m1_m(vbool2_t mask, vint32m1_t dest, vint16m8_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16m8_i32m1_m(mask, dest, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m8_i32m1_m(vbool2_t mask, vint32m1_t maskedoff, vint16m8_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16m8_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32mf2_i64m1_m(vbool64_t mask, vint64m1_t dest, vint32mf2_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32mf2_i64m1_m(mask, dest, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32mf2_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32mf2_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m1_i64m1_m(vbool32_t mask, vint64m1_t dest, vint32m1_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32m1_i64m1_m(mask, dest, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m1_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint32m1_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32m1_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m2_i64m1_m(vbool16_t mask, vint64m1_t dest, vint32m2_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32m2_i64m1_m(mask, dest, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m2_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint32m2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32m2_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m4_i64m1_m(vbool8_t mask, vint64m1_t dest, vint32m4_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32m4_i64m1_m(mask, dest, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m4_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint32m4_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32m4_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m8_i64m1_m(vbool4_t mask, vint64m1_t dest, vint32m8_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32m8_i64m1_m(mask, dest, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m8_i64m1_m(vbool4_t mask, vint64m1_t maskedoff, vint32m8_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32m8_i64m1_m(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwredsumu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwredsumu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwredsumu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwredsumu.c @@ -8,325 +8,325 @@ // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1(vuint16m1_t dest, vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8mf8_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1(vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8mf8_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1(vuint16m1_t dest, vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8mf4_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1(vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8mf4_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1(vuint16m1_t dest, vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8mf2_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1(vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8mf2_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m1_u16m1(vuint16m1_t dest, vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8m1_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m1_u16m1(vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m1_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m2_u16m1(vuint16m1_t dest, vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8m2_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m2_u16m1(vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m2_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m4_u16m1(vuint16m1_t dest, vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8m4_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m4_u16m1(vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m4_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m8_u16m1(vuint16m1_t dest, vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8m8_u16m1(dest, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m8_u16m1(vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m8_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1(vuint32m1_t dest, vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16mf4_u32m1(dest, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1(vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16mf4_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1(vuint32m1_t dest, vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16mf2_u32m1(dest, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1(vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16mf2_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m1_u32m1(vuint32m1_t dest, vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16m1_u32m1(dest, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m1_u32m1(vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m1_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m2_u32m1(vuint32m1_t dest, vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16m2_u32m1(dest, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m2_u32m1(vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m2_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m4_u32m1(vuint32m1_t dest, vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16m4_u32m1(dest, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m4_u32m1(vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m4_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m8_u32m1(vuint32m1_t dest, vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16m8_u32m1(dest, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m8_u32m1(vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m8_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1(vuint64m1_t dest, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32mf2_u64m1(dest, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1(vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32mf2_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m1_u64m1(vuint64m1_t dest, vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32m1_u64m1(dest, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m1_u64m1(vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m1_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m2_u64m1(vuint64m1_t dest, vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32m2_u64m1(dest, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m2_u64m1(vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m2_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m4_u64m1(vuint64m1_t dest, vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32m4_u64m1(dest, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m4_u64m1(vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m4_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m8_u64m1(vuint64m1_t dest, vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32m8_u64m1(dest, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m8_u64m1(vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m8_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_m(vbool64_t mask, vuint16m1_t dest, vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8mf8_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_m(vbool64_t mask, vuint16m1_t maskedoff, vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8mf8_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_m(vbool32_t mask, vuint16m1_t dest, vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8mf4_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_m(vbool32_t mask, vuint16m1_t maskedoff, vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8mf4_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_m(vbool16_t mask, vuint16m1_t dest, vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8mf2_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8mf2_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_m(vbool8_t mask, vuint16m1_t dest, vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8m1_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m1_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_m(vbool4_t mask, vuint16m1_t dest, vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8m2_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m2_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_m(vbool2_t mask, vuint16m1_t dest, vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8m4_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m4_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_m(vbool1_t mask, vuint16m1_t dest, vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8m8_u16m1_m(mask, dest, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_m(vbool1_t mask, vuint16m1_t maskedoff, vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m8_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_m(vbool64_t mask, vuint32m1_t dest, vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16mf4_u32m1_m(mask, dest, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_m(vbool64_t mask, vuint32m1_t maskedoff, vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16mf4_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_m(vbool32_t mask, vuint32m1_t dest, vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16mf2_u32m1_m(mask, dest, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16mf2_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_m(vbool16_t mask, vuint32m1_t dest, vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16m1_u32m1_m(mask, dest, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m1_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_m(vbool8_t mask, vuint32m1_t dest, vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16m2_u32m1_m(mask, dest, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m2_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_m(vbool4_t mask, vuint32m1_t dest, vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16m4_u32m1_m(mask, dest, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m4_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_m(vbool2_t mask, vuint32m1_t dest, vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16m8_u32m1_m(mask, dest, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_m(vbool2_t mask, vuint32m1_t maskedoff, vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m8_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_m(vbool64_t mask, vuint64m1_t dest, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32mf2_u64m1_m(mask, dest, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32mf2_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_m(vbool32_t mask, vuint64m1_t dest, vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32m1_u64m1_m(mask, dest, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m1_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_m(vbool16_t mask, vuint64m1_t dest, vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32m2_u64m1_m(mask, dest, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m2_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_m(vbool8_t mask, vuint64m1_t dest, vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32m4_u64m1_m(mask, dest, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m4_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_m(vbool4_t mask, vuint64m1_t dest, vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32m8_u64m1_m(mask, dest, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_m(vbool4_t mask, vuint64m1_t maskedoff, vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m8_u64m1_m(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredmax.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredmax.c @@ -9,271 +9,271 @@ // CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf4_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv1f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1(vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax(dest, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1(vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf2_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv2f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1(vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax(dest, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1(vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m1_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv4f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16m1_f16m1(vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax(dest, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m2_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv8f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16m2_f16m1(vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax(dest, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m4_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv16f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16m4_f16m1(vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax(dest, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m8_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv32f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16m8_f16m1(vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax(dest, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1(vfloat32m1_t dest, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax(dest, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv2f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m1_f32m1(vfloat32m1_t dest, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax(dest, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv4f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m2_f32m1(vfloat32m1_t dest, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax(dest, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv8f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m4_f32m1(vfloat32m1_t dest, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax(dest, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv16f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m8_f32m1(vfloat32m1_t dest, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax(dest, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv1f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m1_f64m1(vfloat64m1_t dest, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmax(dest, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv2f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m2_f64m1(vfloat64m1_t dest, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmax(dest, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv4f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m4_f64m1(vfloat64m1_t dest, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmax(dest, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv8f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m8_f64m1(vfloat64m1_t dest, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmax(dest, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf4_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf2_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m1_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m2_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m4_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m8_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dest, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dest, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dest, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dest, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmax(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dest, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmax(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dest, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmax(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dest, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmax(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredmin.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredmin.c @@ -9,271 +9,271 @@ // CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf4_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv1f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1(vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin(dest, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1(vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf2_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv2f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1(vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin(dest, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1(vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m1_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv4f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16m1_f16m1(vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin(dest, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m2_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv8f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16m2_f16m1(vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin(dest, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m4_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv16f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16m4_f16m1(vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin(dest, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m8_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv32f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16m8_f16m1(vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin(dest, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1(vfloat32m1_t dest, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin(dest, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv2f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m1_f32m1(vfloat32m1_t dest, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin(dest, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv4f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m2_f32m1(vfloat32m1_t dest, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin(dest, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv8f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m4_f32m1(vfloat32m1_t dest, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin(dest, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv16f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m8_f32m1(vfloat32m1_t dest, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin(dest, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv1f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m1_f64m1(vfloat64m1_t dest, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmin(dest, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv2f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m2_f64m1(vfloat64m1_t dest, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmin(dest, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv4f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m4_f64m1(vfloat64m1_t dest, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmin(dest, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv8f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m8_f64m1(vfloat64m1_t dest, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmin(dest, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf4_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf2_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m1_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m2_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m4_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m8_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dest, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dest, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dest, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dest, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmin(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dest, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmin(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dest, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmin(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dest, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmin(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredosum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredosum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredosum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredosum.c @@ -9,271 +9,271 @@ // CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf4_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv1f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1(vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum(dest, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1(vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf2_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv2f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1(vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum(dest, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1(vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m1_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv4f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16m1_f16m1(vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum(dest, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m2_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv8f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16m2_f16m1(vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum(dest, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m4_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv16f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16m4_f16m1(vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum(dest, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m8_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv32f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16m8_f16m1(vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum(dest, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1(vfloat32m1_t dest, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum(dest, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv2f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m1_f32m1(vfloat32m1_t dest, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum(dest, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv4f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m2_f32m1(vfloat32m1_t dest, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum(dest, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv8f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m4_f32m1(vfloat32m1_t dest, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum(dest, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv16f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m8_f32m1(vfloat32m1_t dest, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum(dest, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv1f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m1_f64m1(vfloat64m1_t dest, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredosum(dest, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv2f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m2_f64m1(vfloat64m1_t dest, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredosum(dest, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv4f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m4_f64m1(vfloat64m1_t dest, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredosum(dest, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv8f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m8_f64m1(vfloat64m1_t dest, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredosum(dest, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf4_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf2_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m1_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m2_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m4_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m8_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dest, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dest, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dest, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dest, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredosum(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dest, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredosum(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dest, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredosum(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dest, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredosum(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredusum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredusum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredusum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredusum.c @@ -9,271 +9,271 @@ // CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf4_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv1f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1(vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum(dest, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1(vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf2_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv2f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1(vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum(dest, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1(vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m1_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv4f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16m1_f16m1(vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum(dest, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m2_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv8f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16m2_f16m1(vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum(dest, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m4_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv16f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16m4_f16m1(vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum(dest, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m8_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv32f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16m8_f16m1(vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum(dest, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv1f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1(vfloat32m1_t dest, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum(dest, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv2f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32m1_f32m1(vfloat32m1_t dest, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum(dest, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv4f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32m2_f32m1(vfloat32m1_t dest, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum(dest, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv8f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32m4_f32m1(vfloat32m1_t dest, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum(dest, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv16f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32m8_f32m1(vfloat32m1_t dest, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum(dest, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv1f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredusum_vs_f64m1_f64m1(vfloat64m1_t dest, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredusum(dest, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv2f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredusum_vs_f64m2_f64m1(vfloat64m1_t dest, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredusum(dest, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv4f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredusum_vs_f64m4_f64m1(vfloat64m1_t dest, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredusum(dest, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv8f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredusum_vs_f64m8_f64m1(vfloat64m1_t dest, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredusum(dest, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf4_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf2_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m1_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m2_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m4_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m8_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum(mask, dest, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dest, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dest, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dest, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dest, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredusum(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dest, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredusum(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dest, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredusum(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dest, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredusum(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwredosum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwredosum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwredosum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwredosum.c @@ -9,199 +9,199 @@ // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv1f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1(vfloat32m1_t dest, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum(dest, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1(vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv2f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1(vfloat32m1_t dest, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum(dest, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1(vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m1_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv4f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1(vfloat32m1_t dest, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum(dest, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1(vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv8f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1(vfloat32m1_t dest, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum(dest, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1(vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv16f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1(vfloat32m1_t dest, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum(dest, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1(vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m8_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv32f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv2f32.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1(vfloat32m1_t dest, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum(dest, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1(vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1(vfloat64m1_t dest, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum(dest, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1(vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv2f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1(vfloat64m1_t dest, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum(dest, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1(vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv4f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1(vfloat64m1_t dest, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum(dest, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1(vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv8f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1(vfloat64m1_t dest, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum(dest, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1(vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv16f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1(vfloat64m1_t dest, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum(dest, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1(vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv1f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_m(vbool64_t mask, vfloat32m1_t dest, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_m(vbool64_t mask, vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv2f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_m(vbool32_t mask, vfloat32m1_t dest, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m1_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv4f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat32m1_t dest, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv8f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat32m1_t dest, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv16f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat32m1_t dest, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m8_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat32m1_t dest, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dest, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dest, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dest, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dest, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dest, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwredusum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwredusum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwredusum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwredusum.c @@ -9,199 +9,199 @@ // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv1f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1(vfloat32m1_t dest, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum(dest, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1(vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv2f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1(vfloat32m1_t dest, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum(dest, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1(vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m1_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv4f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1(vfloat32m1_t dest, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum(dest, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1(vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv8f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1(vfloat32m1_t dest, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum(dest, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1(vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv16f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1(vfloat32m1_t dest, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum(dest, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1(vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m8_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv32f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv2f32.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1(vfloat32m1_t dest, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum(dest, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1(vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv1f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1(vfloat64m1_t dest, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum(dest, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1(vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m1_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv2f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1(vfloat64m1_t dest, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum(dest, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1(vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv4f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1(vfloat64m1_t dest, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum(dest, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1(vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m4_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv8f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1(vfloat64m1_t dest, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum(dest, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1(vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m8_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv16f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1(vfloat64m1_t dest, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum(dest, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1(vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv1f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_m(vbool64_t mask, vfloat32m1_t dest, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_m(vbool64_t mask, vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv2f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_m(vbool32_t mask, vfloat32m1_t dest, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m1_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv4f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat32m1_t dest, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv8f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat32m1_t dest, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv16f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat32m1_t dest, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m8_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv32f16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat32m1_t dest, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum(mask, dest, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dest, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dest, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dest, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dest, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dest, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum(mask, dest, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredand.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredand.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredand.c @@ -8,793 +8,793 @@ // CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8mf8_i8m1(vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8mf4_i8m1(vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8mf2_i8m1(vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m1_i8m1(vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m2_i8m1(vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m4_i8m1(vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m8_i8m1(vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16mf4_i16m1(vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16mf2_i16m1(vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m1_i16m1(vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m2_i16m1(vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m4_i16m1(vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m8_i16m1(vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32mf2_i32m1(vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m1_i32m1(vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m2_i32m1(vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m4_i32m1(vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m8_i32m1(vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m1_i64m1(vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m2_i64m1(vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m4_i64m1(vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m8_i64m1(vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8mf8_u8m1(vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8mf4_u8m1(vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8mf2_u8m1(vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m1_u8m1(vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m2_u8m1(vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m4_u8m1(vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m8_u8m1(vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16mf4_u16m1(vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16mf2_u16m1(vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m1_u16m1(vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m2_u16m1(vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m4_u16m1(vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m8_u16m1(vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32mf2_u32m1(vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m1_u32m1(vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m2_u32m1(vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m4_u32m1(vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m8_u32m1(vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m1_u64m1(vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m2_u64m1(vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m4_u64m1(vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m8_u64m1(vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredand(dest, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredand(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredand(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredand(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmax.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmax.c @@ -8,397 +8,397 @@ // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8mf8_i8m1(vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredmax(dest, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8mf4_i8m1(vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredmax(dest, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8mf2_i8m1(vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredmax(dest, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m1_i8m1(vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredmax(dest, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m2_i8m1(vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredmax(dest, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m4_i8m1(vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredmax(dest, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m8_i8m1(vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredmax(dest, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16mf4_i16m1(vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredmax(dest, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16mf2_i16m1(vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredmax(dest, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m1_i16m1(vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredmax(dest, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m2_i16m1(vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredmax(dest, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m4_i16m1(vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredmax(dest, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m8_i16m1(vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredmax(dest, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32mf2_i32m1(vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredmax(dest, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m1_i32m1(vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredmax(dest, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m2_i32m1(vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredmax(dest, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m4_i32m1(vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredmax(dest, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m8_i32m1(vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredmax(dest, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m1_i64m1(vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredmax(dest, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m2_i64m1(vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredmax(dest, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m4_i64m1(vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredmax(dest, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m8_i64m1(vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredmax(dest, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredmax(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredmax(mask, dest, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredmax(mask, dest, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredmax(mask, dest, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredmax(mask, dest, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredmax(mask, dest, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredmax(mask, dest, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredmax(mask, dest, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredmax(mask, dest, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredmax(mask, dest, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredmax(mask, dest, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredmax(mask, dest, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredmax(mask, dest, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredmax(mask, dest, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredmax(mask, dest, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredmax(mask, dest, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredmax(mask, dest, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredmax(mask, dest, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredmax(mask, dest, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredmax(mask, dest, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredmax(mask, dest, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredmax(mask, dest, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredmax(mask, dest, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredmax(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmaxu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmaxu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmaxu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmaxu.c @@ -8,397 +8,397 @@ // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1(vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu(dest, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1(vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu(dest, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1(vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu(dest, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m1_u8m1(vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu(dest, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m2_u8m1(vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu(dest, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m4_u8m1(vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu(dest, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m8_u8m1(vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu(dest, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1(vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu(dest, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1(vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu(dest, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m1_u16m1(vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu(dest, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m2_u16m1(vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu(dest, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m4_u16m1(vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu(dest, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m8_u16m1(vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu(dest, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1(vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu(dest, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m1_u32m1(vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu(dest, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m2_u32m1(vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu(dest, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m4_u32m1(vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu(dest, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m8_u32m1(vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu(dest, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m1_u64m1(vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredmaxu(dest, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m2_u64m1(vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredmaxu(dest, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m4_u64m1(vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredmaxu(dest, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m8_u64m1(vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredmaxu(dest, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredmaxu(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredmaxu(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredmaxu(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredmaxu(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmin.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmin.c @@ -8,397 +8,397 @@ // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8mf8_i8m1(vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredmin(dest, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8mf4_i8m1(vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredmin(dest, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8mf2_i8m1(vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredmin(dest, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m1_i8m1(vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredmin(dest, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m2_i8m1(vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredmin(dest, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m4_i8m1(vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredmin(dest, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m8_i8m1(vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredmin(dest, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16mf4_i16m1(vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredmin(dest, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16mf2_i16m1(vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredmin(dest, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m1_i16m1(vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredmin(dest, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m2_i16m1(vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredmin(dest, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m4_i16m1(vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredmin(dest, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m8_i16m1(vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredmin(dest, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32mf2_i32m1(vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredmin(dest, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m1_i32m1(vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredmin(dest, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m2_i32m1(vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredmin(dest, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m4_i32m1(vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredmin(dest, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m8_i32m1(vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredmin(dest, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m1_i64m1(vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredmin(dest, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m2_i64m1(vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredmin(dest, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m4_i64m1(vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredmin(dest, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m8_i64m1(vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredmin(dest, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredmin(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredmin(mask, dest, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredmin(mask, dest, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredmin(mask, dest, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredmin(mask, dest, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredmin(mask, dest, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredmin(mask, dest, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredmin(mask, dest, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredmin(mask, dest, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredmin(mask, dest, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredmin(mask, dest, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredmin(mask, dest, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredmin(mask, dest, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredmin(mask, dest, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredmin(mask, dest, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredmin(mask, dest, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredmin(mask, dest, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredmin(mask, dest, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredmin(mask, dest, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredmin(mask, dest, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredmin(mask, dest, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredmin(mask, dest, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredmin(mask, dest, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredmin(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredminu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredminu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredminu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredminu.c @@ -8,397 +8,397 @@ // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8mf8_u8m1(vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu(dest, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8mf4_u8m1(vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu(dest, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8mf2_u8m1(vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu(dest, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m1_u8m1(vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu(dest, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m2_u8m1(vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu(dest, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m4_u8m1(vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu(dest, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m8_u8m1(vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu(dest, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16mf4_u16m1(vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu(dest, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16mf2_u16m1(vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu(dest, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m1_u16m1(vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu(dest, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m2_u16m1(vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu(dest, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m4_u16m1(vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu(dest, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m8_u16m1(vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu(dest, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32mf2_u32m1(vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu(dest, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m1_u32m1(vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu(dest, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m2_u32m1(vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu(dest, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m4_u32m1(vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu(dest, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m8_u32m1(vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu(dest, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m1_u64m1(vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredminu(dest, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m2_u64m1(vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredminu(dest, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m4_u64m1(vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredminu(dest, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m8_u64m1(vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredminu(dest, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredminu(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredminu(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredminu(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredminu(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredor.c @@ -8,793 +8,793 @@ // CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8mf8_i8m1(vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8mf4_i8m1(vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8mf2_i8m1(vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m1_i8m1(vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m2_i8m1(vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m4_i8m1(vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m8_i8m1(vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16mf4_i16m1(vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16mf2_i16m1(vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m1_i16m1(vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m2_i16m1(vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m4_i16m1(vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m8_i16m1(vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32mf2_i32m1(vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m1_i32m1(vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m2_i32m1(vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m4_i32m1(vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m8_i32m1(vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m1_i64m1(vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m2_i64m1(vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m4_i64m1(vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m8_i64m1(vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8mf8_u8m1(vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8mf4_u8m1(vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8mf2_u8m1(vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m1_u8m1(vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m2_u8m1(vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m4_u8m1(vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m8_u8m1(vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16mf4_u16m1(vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16mf2_u16m1(vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m1_u16m1(vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m2_u16m1(vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m4_u16m1(vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m8_u16m1(vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32mf2_u32m1(vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m1_u32m1(vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m2_u32m1(vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m4_u32m1(vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m8_u32m1(vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m1_u64m1(vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m2_u64m1(vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m4_u64m1(vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m8_u64m1(vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredor(dest, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredor(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredor(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredsum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredsum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredsum.c @@ -8,793 +8,793 @@ // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8mf8_i8m1(vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8mf4_i8m1(vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8mf2_i8m1(vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m1_i8m1(vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m2_i8m1(vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m4_i8m1(vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m8_i8m1(vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16mf4_i16m1(vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16mf2_i16m1(vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m1_i16m1(vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m2_i16m1(vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m4_i16m1(vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m8_i16m1(vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32mf2_i32m1(vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m1_i32m1(vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m2_i32m1(vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m4_i32m1(vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m8_i32m1(vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m1_i64m1(vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m2_i64m1(vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m4_i64m1(vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m8_i64m1(vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8mf8_u8m1(vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8mf4_u8m1(vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8mf2_u8m1(vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m1_u8m1(vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m2_u8m1(vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m4_u8m1(vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m8_u8m1(vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16mf4_u16m1(vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16mf2_u16m1(vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m1_u16m1(vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m2_u16m1(vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m4_u16m1(vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m8_u16m1(vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32mf2_u32m1(vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m1_u32m1(vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m2_u32m1(vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m4_u32m1(vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m8_u32m1(vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m1_u64m1(vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m2_u64m1(vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m4_u64m1(vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m8_u64m1(vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredsum(dest, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredsum(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredxor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredxor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredxor.c @@ -8,793 +8,793 @@ // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8mf8_i8m1(vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8mf4_i8m1(vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8mf2_i8m1(vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m1_i8m1(vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m2_i8m1(vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m4_i8m1(vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m8_i8m1(vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16mf4_i16m1(vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16mf2_i16m1(vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m1_i16m1(vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m2_i16m1(vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m4_i16m1(vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m8_i16m1(vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32mf2_i32m1(vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m1_i32m1(vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m2_i32m1(vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m4_i32m1(vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m8_i32m1(vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m1_i64m1(vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m2_i64m1(vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m4_i64m1(vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m8_i64m1(vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8mf8_u8m1(vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8mf4_u8m1(vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8mf2_u8m1(vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m1_u8m1(vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m2_u8m1(vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m4_u8m1(vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m8_u8m1(vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16mf4_u16m1(vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16mf2_u16m1(vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m1_u16m1(vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m2_u16m1(vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m4_u16m1(vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m8_u16m1(vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32mf2_u32m1(vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m1_u32m1(vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m2_u32m1(vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m4_u32m1(vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m8_u32m1(vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m1_u64m1(vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m2_u64m1(vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m4_u64m1(vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m8_u64m1(vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredxor(dest, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredxor(mask, dest, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwredsum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwredsum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwredsum.c @@ -8,325 +8,325 @@ // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8mf8_i16m1(vint16m1_t dest, vint8mf8_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum(dest, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf8_i16m1(vint8mf8_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8mf4_i16m1(vint16m1_t dest, vint8mf4_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum(dest, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf4_i16m1(vint8mf4_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8mf2_i16m1(vint16m1_t dest, vint8mf2_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum(dest, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf2_i16m1(vint8mf2_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m1_i16m1(vint16m1_t dest, vint8m1_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum(dest, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m1_i16m1(vint8m1_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m2_i16m1(vint16m1_t dest, vint8m2_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum(dest, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m2_i16m1(vint8m2_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m4_i16m1(vint16m1_t dest, vint8m4_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum(dest, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m4_i16m1(vint8m4_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m8_i16m1(vint16m1_t dest, vint8m8_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum(dest, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m8_i16m1(vint8m8_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16mf4_i32m1(vint32m1_t dest, vint16mf4_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum(dest, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16mf4_i32m1(vint16mf4_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16mf2_i32m1(vint32m1_t dest, vint16mf2_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum(dest, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16mf2_i32m1(vint16mf2_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m1_i32m1(vint32m1_t dest, vint16m1_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum(dest, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m1_i32m1(vint16m1_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m2_i32m1(vint32m1_t dest, vint16m2_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum(dest, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m2_i32m1(vint16m2_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m4_i32m1(vint32m1_t dest, vint16m4_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum(dest, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m4_i32m1(vint16m4_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m8_i32m1(vint32m1_t dest, vint16m8_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum(dest, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m8_i32m1(vint16m8_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32mf2_i64m1(vint64m1_t dest, vint32mf2_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum(dest, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32mf2_i64m1(vint32mf2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m1_i64m1(vint64m1_t dest, vint32m1_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum(dest, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m1_i64m1(vint32m1_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m2_i64m1(vint64m1_t dest, vint32m2_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum(dest, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m2_i64m1(vint32m2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m4_i64m1(vint64m1_t dest, vint32m4_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum(dest, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m4_i64m1(vint32m4_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m8_i64m1(vint64m1_t dest, vint32m8_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum(dest, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m8_i64m1(vint32m8_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8mf8_i16m1_m(vbool64_t mask, vint16m1_t dest, vint8mf8_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum(mask, dest, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf8_i16m1_m(vbool64_t mask, vint16m1_t maskedoff, vint8mf8_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8mf4_i16m1_m(vbool32_t mask, vint16m1_t dest, vint8mf4_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum(mask, dest, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf4_i16m1_m(vbool32_t mask, vint16m1_t maskedoff, vint8mf4_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8mf2_i16m1_m(vbool16_t mask, vint16m1_t dest, vint8mf2_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum(mask, dest, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf2_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m1_i16m1_m(vbool8_t mask, vint16m1_t dest, vint8m1_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum(mask, dest, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m1_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint8m1_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m2_i16m1_m(vbool4_t mask, vint16m1_t dest, vint8m2_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum(mask, dest, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m2_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint8m2_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m4_i16m1_m(vbool2_t mask, vint16m1_t dest, vint8m4_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum(mask, dest, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m4_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint8m4_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m8_i16m1_m(vbool1_t mask, vint16m1_t dest, vint8m8_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum(mask, dest, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m8_i16m1_m(vbool1_t mask, vint16m1_t maskedoff, vint8m8_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16mf4_i32m1_m(vbool64_t mask, vint32m1_t dest, vint16mf4_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum(mask, dest, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16mf4_i32m1_m(vbool64_t mask, vint32m1_t maskedoff, vint16mf4_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16mf2_i32m1_m(vbool32_t mask, vint32m1_t dest, vint16mf2_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum(mask, dest, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16mf2_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m1_i32m1_m(vbool16_t mask, vint32m1_t dest, vint16m1_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum(mask, dest, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m1_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint16m1_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m2_i32m1_m(vbool8_t mask, vint32m1_t dest, vint16m2_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum(mask, dest, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m2_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint16m2_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m4_i32m1_m(vbool4_t mask, vint32m1_t dest, vint16m4_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum(mask, dest, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m4_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint16m4_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m8_i32m1_m(vbool2_t mask, vint32m1_t dest, vint16m8_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum(mask, dest, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m8_i32m1_m(vbool2_t mask, vint32m1_t maskedoff, vint16m8_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32mf2_i64m1_m(vbool64_t mask, vint64m1_t dest, vint32mf2_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum(mask, dest, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32mf2_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m1_i64m1_m(vbool32_t mask, vint64m1_t dest, vint32m1_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum(mask, dest, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m1_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint32m1_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m2_i64m1_m(vbool16_t mask, vint64m1_t dest, vint32m2_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum(mask, dest, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m2_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint32m2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m4_i64m1_m(vbool8_t mask, vint64m1_t dest, vint32m4_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum(mask, dest, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m4_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint32m4_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m8_i64m1_m(vbool4_t mask, vint64m1_t dest, vint32m8_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum(mask, dest, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m8_i64m1_m(vbool4_t mask, vint64m1_t maskedoff, vint32m8_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwredsumu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwredsumu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwredsumu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwredsumu.c @@ -8,325 +8,325 @@ // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1(vuint16m1_t dest, vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu(dest, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1(vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1(vuint16m1_t dest, vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu(dest, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1(vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1(vuint16m1_t dest, vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu(dest, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1(vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m1_u16m1(vuint16m1_t dest, vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu(dest, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m1_u16m1(vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m2_u16m1(vuint16m1_t dest, vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu(dest, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m2_u16m1(vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m4_u16m1(vuint16m1_t dest, vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu(dest, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m4_u16m1(vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m8_u16m1(vuint16m1_t dest, vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu(dest, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m8_u16m1(vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1(vuint32m1_t dest, vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu(dest, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1(vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1(vuint32m1_t dest, vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu(dest, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1(vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m1_u32m1(vuint32m1_t dest, vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu(dest, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m1_u32m1(vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m2_u32m1(vuint32m1_t dest, vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu(dest, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m2_u32m1(vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m4_u32m1(vuint32m1_t dest, vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu(dest, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m4_u32m1(vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m8_u32m1(vuint32m1_t dest, vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu(dest, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m8_u32m1(vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1(vuint64m1_t dest, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu(dest, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1(vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m1_u64m1(vuint64m1_t dest, vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu(dest, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m1_u64m1(vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m2_u64m1(vuint64m1_t dest, vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu(dest, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m2_u64m1(vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m4_u64m1(vuint64m1_t dest, vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu(dest, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m4_u64m1(vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m8_u64m1(vuint64m1_t dest, vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu(dest, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m8_u64m1(vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_m(vbool64_t mask, vuint16m1_t dest, vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu(mask, dest, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_m(vbool64_t mask, vuint16m1_t maskedoff, vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_m(vbool32_t mask, vuint16m1_t dest, vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu(mask, dest, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_m(vbool32_t mask, vuint16m1_t maskedoff, vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_m(vbool16_t mask, vuint16m1_t dest, vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu(mask, dest, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_m(vbool8_t mask, vuint16m1_t dest, vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu(mask, dest, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_m(vbool4_t mask, vuint16m1_t dest, vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu(mask, dest, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_m(vbool2_t mask, vuint16m1_t dest, vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu(mask, dest, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_m(vbool1_t mask, vuint16m1_t dest, vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu(mask, dest, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_m(vbool1_t mask, vuint16m1_t maskedoff, vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_m(vbool64_t mask, vuint32m1_t dest, vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu(mask, dest, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_m(vbool64_t mask, vuint32m1_t maskedoff, vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_m(vbool32_t mask, vuint32m1_t dest, vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu(mask, dest, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_m(vbool16_t mask, vuint32m1_t dest, vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu(mask, dest, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_m(vbool8_t mask, vuint32m1_t dest, vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu(mask, dest, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_m(vbool4_t mask, vuint32m1_t dest, vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu(mask, dest, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_m(vbool2_t mask, vuint32m1_t dest, vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu(mask, dest, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_m(vbool2_t mask, vuint32m1_t maskedoff, vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_m(vbool64_t mask, vuint64m1_t dest, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu(mask, dest, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_m(vbool32_t mask, vuint64m1_t dest, vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu(mask, dest, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_m(vbool16_t mask, vuint64m1_t dest, vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu(mask, dest, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_m(vbool8_t mask, vuint64m1_t dest, vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu(mask, dest, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.i64( [[DEST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_m(vbool4_t mask, vuint64m1_t dest, vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu(mask, dest, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_m(vbool4_t mask, vuint64m1_t maskedoff, vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu(mask, maskedoff, vector, scalar, vl); }