diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -137,8 +137,6 @@ def NonePolicy : PolicyScheme<0>; def HasPassthruOperand : PolicyScheme<1>; def HasPolicyOperand : PolicyScheme<2>; -// Specail case for passthru operand which is not a first opeand. -def HasPassthruOperandAtIdx1 : PolicyScheme<3>; class RVVBuiltin { @@ -1852,20 +1850,19 @@ // 12.15. Vector Integer Merge Instructions // C/C++ Operand: (mask, op1, op2, vl), Intrinsic: (passthru, op1, op2, mask, vl) let HasMasked = false, - UnMaskedPolicyScheme = HasPassthruOperandAtIdx1, + UnMaskedPolicyScheme = HasPassthruOperand, MaskedPolicyScheme = NonePolicy, ManualCodegen = [{ - std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); // insert poison passthru if (PolicyAttrs == TAIL_AGNOSTIC) Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType)); IntrinsicTypes = {ResultType, Ops[2]->getType(), Ops.back()->getType()}; }] in { defm vmerge : RVVOutOp1BuiltinSet<"vmerge", "csil", - [["vvm", "v", "vmvv"], - ["vxm", "v", "vmve"], - ["vvm", "Uv", "UvmUvUv"], - ["vxm", "Uv", "UvmUvUe"]]>; + [["vvm", "v", "vvvm"], + ["vxm", "v", "vvem"], + ["vvm", "Uv", "UvUvUvm"], + ["vxm", "Uv", "UvUvUem"]]>; } // 12.16. Vector Integer Move Instructions @@ -1996,19 +1993,18 @@ // 14.15. Vector Floating-Point Merge Instructio // C/C++ Operand: (mask, op1, op2, vl), Builtin: (op1, op2, mask, vl) let HasMasked = false, - UnMaskedPolicyScheme = HasPassthruOperandAtIdx1, + UnMaskedPolicyScheme = HasPassthruOperand, MaskedPolicyScheme = NonePolicy, ManualCodegen = [{ - std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); // insert poison passthru if (PolicyAttrs == TAIL_AGNOSTIC) Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType)); IntrinsicTypes = {ResultType, Ops[2]->getType(), Ops.back()->getType()}; }] in { defm vmerge : RVVOutOp1BuiltinSet<"vmerge", "xfd", - [["vvm", "v", "vmvv"]]>; + [["vvm", "v", "vvvm"]]>; defm vfmerge : RVVOutOp1BuiltinSet<"vfmerge", "xfd", - [["vfm", "v", "vmve"]]>; + [["vfm", "v", "vvem"]]>; } // 14.16. Vector Floating-Point Move Instruction @@ -2196,10 +2192,9 @@ // 17.5. Vector Compress Instruction let IsPrototypeDefaultTU = true, HasMasked = false, - UnMaskedPolicyScheme = HasPassthruOperandAtIdx1, + UnMaskedPolicyScheme = HasPassthruOperand, MaskedPolicyScheme = NonePolicy, ManualCodegen = [{ - std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); // insert poison passthru if (PolicyAttrs == TAIL_AGNOSTIC) Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType)); @@ -2207,10 +2202,10 @@ }] in { // signed and floating type defm vcompress : RVVOutBuiltinSet<"vcompress", "csilxfd", - [["vm", "v", "vmvv"]]>; + [["vm", "v", "vvvm"]]>; // unsigned type defm vcompress : RVVOutBuiltinSet<"vcompress", "csil", - [["vm", "Uv", "UvmUvUv"]]>; + [["vm", "Uv", "UvUvUvm"]]>; } // Miscellaneous diff --git a/clang/include/clang/Support/RISCVVIntrinsicUtils.h b/clang/include/clang/Support/RISCVVIntrinsicUtils.h --- a/clang/include/clang/Support/RISCVVIntrinsicUtils.h +++ b/clang/include/clang/Support/RISCVVIntrinsicUtils.h @@ -366,9 +366,6 @@ // Passthru operand is at first parameter in C builtin. HasPassthruOperand, HasPolicyOperand, - // Special case for vmerge, the passthru operand is second - // parameter in C builtin. - HasPassthruOperandAtIdx1, }; // TODO refactor RVVIntrinsic class design after support all intrinsic diff --git a/clang/lib/Support/RISCVVIntrinsicUtils.cpp b/clang/lib/Support/RISCVVIntrinsicUtils.cpp --- a/clang/lib/Support/RISCVVIntrinsicUtils.cpp +++ b/clang/lib/Support/RISCVVIntrinsicUtils.cpp @@ -964,15 +964,6 @@ else if (PolicyAttrs.isTAPolicy() && HasPassthruOp && IsPrototypeDefaultTU) NewPrototype.erase(NewPrototype.begin() + 1); - if (DefaultScheme == PolicyScheme::HasPassthruOperandAtIdx1) { - if (PolicyAttrs.isTUPolicy() && !IsPrototypeDefaultTU) { - // Insert undisturbed output to index 1 - NewPrototype.insert(NewPrototype.begin() + 2, NewPrototype[0]); - } else if (PolicyAttrs.isTAPolicy() && IsPrototypeDefaultTU) { - // Erase passthru for TA policy - NewPrototype.erase(NewPrototype.begin() + 2); - } - } } else if (PolicyAttrs.isTUPolicy() && HasPassthruOp) { // NF > 1 cases for segment load operations. // Convert diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcompress.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcompress.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcompress.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcompress.c @@ -11,8 +11,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vcompress_vm_i8mf8(vbool64_t mask, vint8mf8_t dest, vint8mf8_t src, size_t vl) { - return vcompress_vm_i8mf8(mask, dest, src, vl); +vint8mf8_t test_vcompress_vm_i8mf8(vint8mf8_t dest, vint8mf8_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_i8mf8(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf4( @@ -20,8 +20,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vcompress_vm_i8mf4(vbool32_t mask, vint8mf4_t dest, vint8mf4_t src, size_t vl) { - return vcompress_vm_i8mf4(mask, dest, src, vl); +vint8mf4_t test_vcompress_vm_i8mf4(vint8mf4_t dest, vint8mf4_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_i8mf4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf2( @@ -29,8 +29,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vcompress_vm_i8mf2(vbool16_t mask, vint8mf2_t dest, vint8mf2_t src, size_t vl) { - return vcompress_vm_i8mf2(mask, dest, src, vl); +vint8mf2_t test_vcompress_vm_i8mf2(vint8mf2_t dest, vint8mf2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_i8mf2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m1( @@ -38,8 +38,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vcompress_vm_i8m1(vbool8_t mask, vint8m1_t dest, vint8m1_t src, size_t vl) { - return vcompress_vm_i8m1(mask, dest, src, vl); +vint8m1_t test_vcompress_vm_i8m1(vint8m1_t dest, vint8m1_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_i8m1(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m2( @@ -47,8 +47,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vcompress_vm_i8m2(vbool4_t mask, vint8m2_t dest, vint8m2_t src, size_t vl) { - return vcompress_vm_i8m2(mask, dest, src, vl); +vint8m2_t test_vcompress_vm_i8m2(vint8m2_t dest, vint8m2_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_i8m2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m4( @@ -56,8 +56,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vcompress_vm_i8m4(vbool2_t mask, vint8m4_t dest, vint8m4_t src, size_t vl) { - return vcompress_vm_i8m4(mask, dest, src, vl); +vint8m4_t test_vcompress_vm_i8m4(vint8m4_t dest, vint8m4_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_i8m4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m8( @@ -65,8 +65,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv64i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vcompress_vm_i8m8(vbool1_t mask, vint8m8_t dest, vint8m8_t src, size_t vl) { - return vcompress_vm_i8m8(mask, dest, src, vl); +vint8m8_t test_vcompress_vm_i8m8(vint8m8_t dest, vint8m8_t src, vbool1_t mask, size_t vl) { + return vcompress_vm_i8m8(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf4( @@ -74,8 +74,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vcompress_vm_i16mf4(vbool64_t mask, vint16mf4_t dest, vint16mf4_t src, size_t vl) { - return vcompress_vm_i16mf4(mask, dest, src, vl); +vint16mf4_t test_vcompress_vm_i16mf4(vint16mf4_t dest, vint16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_i16mf4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf2( @@ -83,8 +83,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vcompress_vm_i16mf2(vbool32_t mask, vint16mf2_t dest, vint16mf2_t src, size_t vl) { - return vcompress_vm_i16mf2(mask, dest, src, vl); +vint16mf2_t test_vcompress_vm_i16mf2(vint16mf2_t dest, vint16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_i16mf2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m1( @@ -92,8 +92,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vcompress_vm_i16m1(vbool16_t mask, vint16m1_t dest, vint16m1_t src, size_t vl) { - return vcompress_vm_i16m1(mask, dest, src, vl); +vint16m1_t test_vcompress_vm_i16m1(vint16m1_t dest, vint16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_i16m1(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m2( @@ -101,8 +101,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vcompress_vm_i16m2(vbool8_t mask, vint16m2_t dest, vint16m2_t src, size_t vl) { - return vcompress_vm_i16m2(mask, dest, src, vl); +vint16m2_t test_vcompress_vm_i16m2(vint16m2_t dest, vint16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_i16m2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m4( @@ -110,8 +110,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vcompress_vm_i16m4(vbool4_t mask, vint16m4_t dest, vint16m4_t src, size_t vl) { - return vcompress_vm_i16m4(mask, dest, src, vl); +vint16m4_t test_vcompress_vm_i16m4(vint16m4_t dest, vint16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_i16m4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m8( @@ -119,8 +119,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vcompress_vm_i16m8(vbool2_t mask, vint16m8_t dest, vint16m8_t src, size_t vl) { - return vcompress_vm_i16m8(mask, dest, src, vl); +vint16m8_t test_vcompress_vm_i16m8(vint16m8_t dest, vint16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_i16m8(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2( @@ -128,8 +128,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vcompress_vm_i32mf2(vbool64_t mask, vint32mf2_t dest, vint32mf2_t src, size_t vl) { - return vcompress_vm_i32mf2(mask, dest, src, vl); +vint32mf2_t test_vcompress_vm_i32mf2(vint32mf2_t dest, vint32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_i32mf2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m1( @@ -137,8 +137,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vcompress_vm_i32m1(vbool32_t mask, vint32m1_t dest, vint32m1_t src, size_t vl) { - return vcompress_vm_i32m1(mask, dest, src, vl); +vint32m1_t test_vcompress_vm_i32m1(vint32m1_t dest, vint32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_i32m1(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m2( @@ -146,8 +146,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vcompress_vm_i32m2(vbool16_t mask, vint32m2_t dest, vint32m2_t src, size_t vl) { - return vcompress_vm_i32m2(mask, dest, src, vl); +vint32m2_t test_vcompress_vm_i32m2(vint32m2_t dest, vint32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_i32m2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m4( @@ -155,8 +155,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vcompress_vm_i32m4(vbool8_t mask, vint32m4_t dest, vint32m4_t src, size_t vl) { - return vcompress_vm_i32m4(mask, dest, src, vl); +vint32m4_t test_vcompress_vm_i32m4(vint32m4_t dest, vint32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_i32m4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m8( @@ -164,8 +164,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vcompress_vm_i32m8(vbool4_t mask, vint32m8_t dest, vint32m8_t src, size_t vl) { - return vcompress_vm_i32m8(mask, dest, src, vl); +vint32m8_t test_vcompress_vm_i32m8(vint32m8_t dest, vint32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_i32m8(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m1( @@ -173,8 +173,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vcompress_vm_i64m1(vbool64_t mask, vint64m1_t dest, vint64m1_t src, size_t vl) { - return vcompress_vm_i64m1(mask, dest, src, vl); +vint64m1_t test_vcompress_vm_i64m1(vint64m1_t dest, vint64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_i64m1(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m2( @@ -182,8 +182,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vcompress_vm_i64m2(vbool32_t mask, vint64m2_t dest, vint64m2_t src, size_t vl) { - return vcompress_vm_i64m2(mask, dest, src, vl); +vint64m2_t test_vcompress_vm_i64m2(vint64m2_t dest, vint64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_i64m2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m4( @@ -191,8 +191,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vcompress_vm_i64m4(vbool16_t mask, vint64m4_t dest, vint64m4_t src, size_t vl) { - return vcompress_vm_i64m4(mask, dest, src, vl); +vint64m4_t test_vcompress_vm_i64m4(vint64m4_t dest, vint64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_i64m4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m8( @@ -200,8 +200,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vcompress_vm_i64m8(vbool8_t mask, vint64m8_t dest, vint64m8_t src, size_t vl) { - return vcompress_vm_i64m8(mask, dest, src, vl); +vint64m8_t test_vcompress_vm_i64m8(vint64m8_t dest, vint64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_i64m8(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf8( @@ -209,8 +209,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vcompress_vm_u8mf8(vbool64_t mask, vuint8mf8_t dest, vuint8mf8_t src, size_t vl) { - return vcompress_vm_u8mf8(mask, dest, src, vl); +vuint8mf8_t test_vcompress_vm_u8mf8(vuint8mf8_t dest, vuint8mf8_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_u8mf8(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf4( @@ -218,8 +218,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vcompress_vm_u8mf4(vbool32_t mask, vuint8mf4_t dest, vuint8mf4_t src, size_t vl) { - return vcompress_vm_u8mf4(mask, dest, src, vl); +vuint8mf4_t test_vcompress_vm_u8mf4(vuint8mf4_t dest, vuint8mf4_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_u8mf4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf2( @@ -227,8 +227,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vcompress_vm_u8mf2(vbool16_t mask, vuint8mf2_t dest, vuint8mf2_t src, size_t vl) { - return vcompress_vm_u8mf2(mask, dest, src, vl); +vuint8mf2_t test_vcompress_vm_u8mf2(vuint8mf2_t dest, vuint8mf2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_u8mf2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m1( @@ -236,8 +236,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vcompress_vm_u8m1(vbool8_t mask, vuint8m1_t dest, vuint8m1_t src, size_t vl) { - return vcompress_vm_u8m1(mask, dest, src, vl); +vuint8m1_t test_vcompress_vm_u8m1(vuint8m1_t dest, vuint8m1_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_u8m1(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m2( @@ -245,8 +245,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vcompress_vm_u8m2(vbool4_t mask, vuint8m2_t dest, vuint8m2_t src, size_t vl) { - return vcompress_vm_u8m2(mask, dest, src, vl); +vuint8m2_t test_vcompress_vm_u8m2(vuint8m2_t dest, vuint8m2_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_u8m2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m4( @@ -254,8 +254,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vcompress_vm_u8m4(vbool2_t mask, vuint8m4_t dest, vuint8m4_t src, size_t vl) { - return vcompress_vm_u8m4(mask, dest, src, vl); +vuint8m4_t test_vcompress_vm_u8m4(vuint8m4_t dest, vuint8m4_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_u8m4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m8( @@ -263,8 +263,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv64i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vcompress_vm_u8m8(vbool1_t mask, vuint8m8_t dest, vuint8m8_t src, size_t vl) { - return vcompress_vm_u8m8(mask, dest, src, vl); +vuint8m8_t test_vcompress_vm_u8m8(vuint8m8_t dest, vuint8m8_t src, vbool1_t mask, size_t vl) { + return vcompress_vm_u8m8(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf4( @@ -272,8 +272,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vcompress_vm_u16mf4(vbool64_t mask, vuint16mf4_t dest, vuint16mf4_t src, size_t vl) { - return vcompress_vm_u16mf4(mask, dest, src, vl); +vuint16mf4_t test_vcompress_vm_u16mf4(vuint16mf4_t dest, vuint16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_u16mf4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf2( @@ -281,8 +281,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vcompress_vm_u16mf2(vbool32_t mask, vuint16mf2_t dest, vuint16mf2_t src, size_t vl) { - return vcompress_vm_u16mf2(mask, dest, src, vl); +vuint16mf2_t test_vcompress_vm_u16mf2(vuint16mf2_t dest, vuint16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_u16mf2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m1( @@ -290,8 +290,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vcompress_vm_u16m1(vbool16_t mask, vuint16m1_t dest, vuint16m1_t src, size_t vl) { - return vcompress_vm_u16m1(mask, dest, src, vl); +vuint16m1_t test_vcompress_vm_u16m1(vuint16m1_t dest, vuint16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_u16m1(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m2( @@ -299,8 +299,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vcompress_vm_u16m2(vbool8_t mask, vuint16m2_t dest, vuint16m2_t src, size_t vl) { - return vcompress_vm_u16m2(mask, dest, src, vl); +vuint16m2_t test_vcompress_vm_u16m2(vuint16m2_t dest, vuint16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_u16m2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m4( @@ -308,8 +308,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vcompress_vm_u16m4(vbool4_t mask, vuint16m4_t dest, vuint16m4_t src, size_t vl) { - return vcompress_vm_u16m4(mask, dest, src, vl); +vuint16m4_t test_vcompress_vm_u16m4(vuint16m4_t dest, vuint16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_u16m4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m8( @@ -317,8 +317,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vcompress_vm_u16m8(vbool2_t mask, vuint16m8_t dest, vuint16m8_t src, size_t vl) { - return vcompress_vm_u16m8(mask, dest, src, vl); +vuint16m8_t test_vcompress_vm_u16m8(vuint16m8_t dest, vuint16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_u16m8(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2( @@ -326,8 +326,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vcompress_vm_u32mf2(vbool64_t mask, vuint32mf2_t dest, vuint32mf2_t src, size_t vl) { - return vcompress_vm_u32mf2(mask, dest, src, vl); +vuint32mf2_t test_vcompress_vm_u32mf2(vuint32mf2_t dest, vuint32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_u32mf2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m1( @@ -335,8 +335,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vcompress_vm_u32m1(vbool32_t mask, vuint32m1_t dest, vuint32m1_t src, size_t vl) { - return vcompress_vm_u32m1(mask, dest, src, vl); +vuint32m1_t test_vcompress_vm_u32m1(vuint32m1_t dest, vuint32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_u32m1(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m2( @@ -344,8 +344,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vcompress_vm_u32m2(vbool16_t mask, vuint32m2_t dest, vuint32m2_t src, size_t vl) { - return vcompress_vm_u32m2(mask, dest, src, vl); +vuint32m2_t test_vcompress_vm_u32m2(vuint32m2_t dest, vuint32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_u32m2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m4( @@ -353,8 +353,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vcompress_vm_u32m4(vbool8_t mask, vuint32m4_t dest, vuint32m4_t src, size_t vl) { - return vcompress_vm_u32m4(mask, dest, src, vl); +vuint32m4_t test_vcompress_vm_u32m4(vuint32m4_t dest, vuint32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_u32m4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m8( @@ -362,8 +362,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vcompress_vm_u32m8(vbool4_t mask, vuint32m8_t dest, vuint32m8_t src, size_t vl) { - return vcompress_vm_u32m8(mask, dest, src, vl); +vuint32m8_t test_vcompress_vm_u32m8(vuint32m8_t dest, vuint32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_u32m8(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m1( @@ -371,8 +371,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vcompress_vm_u64m1(vbool64_t mask, vuint64m1_t dest, vuint64m1_t src, size_t vl) { - return vcompress_vm_u64m1(mask, dest, src, vl); +vuint64m1_t test_vcompress_vm_u64m1(vuint64m1_t dest, vuint64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_u64m1(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m2( @@ -380,8 +380,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vcompress_vm_u64m2(vbool32_t mask, vuint64m2_t dest, vuint64m2_t src, size_t vl) { - return vcompress_vm_u64m2(mask, dest, src, vl); +vuint64m2_t test_vcompress_vm_u64m2(vuint64m2_t dest, vuint64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_u64m2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m4( @@ -389,8 +389,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vcompress_vm_u64m4(vbool16_t mask, vuint64m4_t dest, vuint64m4_t src, size_t vl) { - return vcompress_vm_u64m4(mask, dest, src, vl); +vuint64m4_t test_vcompress_vm_u64m4(vuint64m4_t dest, vuint64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_u64m4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m8( @@ -398,8 +398,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vcompress_vm_u64m8(vbool8_t mask, vuint64m8_t dest, vuint64m8_t src, size_t vl) { - return vcompress_vm_u64m8(mask, dest, src, vl); +vuint64m8_t test_vcompress_vm_u64m8(vuint64m8_t dest, vuint64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_u64m8(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16mf4( @@ -407,8 +407,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vcompress_vm_f16mf4(vbool64_t mask, vfloat16mf4_t dest, vfloat16mf4_t src, size_t vl) { - return vcompress_vm_f16mf4(mask, dest, src, vl); +vfloat16mf4_t test_vcompress_vm_f16mf4(vfloat16mf4_t dest, vfloat16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_f16mf4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16mf2( @@ -416,8 +416,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vcompress_vm_f16mf2(vbool32_t mask, vfloat16mf2_t dest, vfloat16mf2_t src, size_t vl) { - return vcompress_vm_f16mf2(mask, dest, src, vl); +vfloat16mf2_t test_vcompress_vm_f16mf2(vfloat16mf2_t dest, vfloat16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_f16mf2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16m1( @@ -425,8 +425,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vcompress_vm_f16m1(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t src, size_t vl) { - return vcompress_vm_f16m1(mask, dest, src, vl); +vfloat16m1_t test_vcompress_vm_f16m1(vfloat16m1_t dest, vfloat16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_f16m1(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16m2( @@ -434,8 +434,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vcompress_vm_f16m2(vbool8_t mask, vfloat16m2_t dest, vfloat16m2_t src, size_t vl) { - return vcompress_vm_f16m2(mask, dest, src, vl); +vfloat16m2_t test_vcompress_vm_f16m2(vfloat16m2_t dest, vfloat16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_f16m2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16m4( @@ -443,8 +443,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vcompress_vm_f16m4(vbool4_t mask, vfloat16m4_t dest, vfloat16m4_t src, size_t vl) { - return vcompress_vm_f16m4(mask, dest, src, vl); +vfloat16m4_t test_vcompress_vm_f16m4(vfloat16m4_t dest, vfloat16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_f16m4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16m8( @@ -452,8 +452,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vcompress_vm_f16m8(vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t vl) { - return vcompress_vm_f16m8(mask, dest, src, vl); +vfloat16m8_t test_vcompress_vm_f16m8(vfloat16m8_t dest, vfloat16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_f16m8(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2( @@ -461,8 +461,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vcompress_vm_f32mf2(vbool64_t mask, vfloat32mf2_t dest, vfloat32mf2_t src, size_t vl) { - return vcompress_vm_f32mf2(mask, dest, src, vl); +vfloat32mf2_t test_vcompress_vm_f32mf2(vfloat32mf2_t dest, vfloat32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_f32mf2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m1( @@ -470,8 +470,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vcompress_vm_f32m1(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t src, size_t vl) { - return vcompress_vm_f32m1(mask, dest, src, vl); +vfloat32m1_t test_vcompress_vm_f32m1(vfloat32m1_t dest, vfloat32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_f32m1(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m2( @@ -479,8 +479,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vcompress_vm_f32m2(vbool16_t mask, vfloat32m2_t dest, vfloat32m2_t src, size_t vl) { - return vcompress_vm_f32m2(mask, dest, src, vl); +vfloat32m2_t test_vcompress_vm_f32m2(vfloat32m2_t dest, vfloat32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_f32m2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m4( @@ -488,8 +488,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vcompress_vm_f32m4(vbool8_t mask, vfloat32m4_t dest, vfloat32m4_t src, size_t vl) { - return vcompress_vm_f32m4(mask, dest, src, vl); +vfloat32m4_t test_vcompress_vm_f32m4(vfloat32m4_t dest, vfloat32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_f32m4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m8( @@ -497,8 +497,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vcompress_vm_f32m8(vbool4_t mask, vfloat32m8_t dest, vfloat32m8_t src, size_t vl) { - return vcompress_vm_f32m8(mask, dest, src, vl); +vfloat32m8_t test_vcompress_vm_f32m8(vfloat32m8_t dest, vfloat32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_f32m8(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m1( @@ -506,8 +506,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vcompress_vm_f64m1(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t src, size_t vl) { - return vcompress_vm_f64m1(mask, dest, src, vl); +vfloat64m1_t test_vcompress_vm_f64m1(vfloat64m1_t dest, vfloat64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_f64m1(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m2( @@ -515,8 +515,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vcompress_vm_f64m2(vbool32_t mask, vfloat64m2_t dest, vfloat64m2_t src, size_t vl) { - return vcompress_vm_f64m2(mask, dest, src, vl); +vfloat64m2_t test_vcompress_vm_f64m2(vfloat64m2_t dest, vfloat64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_f64m2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m4( @@ -524,8 +524,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vcompress_vm_f64m4(vbool16_t mask, vfloat64m4_t dest, vfloat64m4_t src, size_t vl) { - return vcompress_vm_f64m4(mask, dest, src, vl); +vfloat64m4_t test_vcompress_vm_f64m4(vfloat64m4_t dest, vfloat64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_f64m4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m8( @@ -533,7 +533,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vcompress_vm_f64m8(vbool8_t mask, vfloat64m8_t dest, vfloat64m8_t src, size_t vl) { - return vcompress_vm_f64m8(mask, dest, src, vl); +vfloat64m8_t test_vcompress_vm_f64m8(vfloat64m8_t dest, vfloat64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_f64m8(dest, src, mask, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmerge.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmerge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmerge.c @@ -11,8 +11,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmerge_vfm_f16mf4(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfmerge_vfm_f16mf4(mask, op1, op2, vl); +vfloat16mf4_t test_vfmerge_vfm_f16mf4(vfloat16mf4_t op1, _Float16 op2, vbool64_t mask, size_t vl) { + return vfmerge_vfm_f16mf4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16mf2( @@ -20,8 +20,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmerge_vfm_f16mf2(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfmerge_vfm_f16mf2(mask, op1, op2, vl); +vfloat16mf2_t test_vfmerge_vfm_f16mf2(vfloat16mf2_t op1, _Float16 op2, vbool32_t mask, size_t vl) { + return vfmerge_vfm_f16mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m1( @@ -29,8 +29,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmerge_vfm_f16m1(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfmerge_vfm_f16m1(mask, op1, op2, vl); +vfloat16m1_t test_vfmerge_vfm_f16m1(vfloat16m1_t op1, _Float16 op2, vbool16_t mask, size_t vl) { + return vfmerge_vfm_f16m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m2( @@ -38,8 +38,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmerge_vfm_f16m2(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfmerge_vfm_f16m2(mask, op1, op2, vl); +vfloat16m2_t test_vfmerge_vfm_f16m2(vfloat16m2_t op1, _Float16 op2, vbool8_t mask, size_t vl) { + return vfmerge_vfm_f16m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m4( @@ -47,8 +47,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmerge_vfm_f16m4(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfmerge_vfm_f16m4(mask, op1, op2, vl); +vfloat16m4_t test_vfmerge_vfm_f16m4(vfloat16m4_t op1, _Float16 op2, vbool4_t mask, size_t vl) { + return vfmerge_vfm_f16m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m8( @@ -56,8 +56,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmerge_vfm_f16m8(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfmerge_vfm_f16m8(mask, op1, op2, vl); +vfloat16m8_t test_vfmerge_vfm_f16m8(vfloat16m8_t op1, _Float16 op2, vbool2_t mask, size_t vl) { + return vfmerge_vfm_f16m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2( @@ -65,8 +65,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmerge_vfm_f32mf2(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { - return vfmerge_vfm_f32mf2(mask, op1, op2, vl); +vfloat32mf2_t test_vfmerge_vfm_f32mf2(vfloat32mf2_t op1, float op2, vbool64_t mask, size_t vl) { + return vfmerge_vfm_f32mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m1( @@ -74,8 +74,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmerge_vfm_f32m1(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return vfmerge_vfm_f32m1(mask, op1, op2, vl); +vfloat32m1_t test_vfmerge_vfm_f32m1(vfloat32m1_t op1, float op2, vbool32_t mask, size_t vl) { + return vfmerge_vfm_f32m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m2( @@ -83,8 +83,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmerge_vfm_f32m2(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return vfmerge_vfm_f32m2(mask, op1, op2, vl); +vfloat32m2_t test_vfmerge_vfm_f32m2(vfloat32m2_t op1, float op2, vbool16_t mask, size_t vl) { + return vfmerge_vfm_f32m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m4( @@ -92,8 +92,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmerge_vfm_f32m4(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return vfmerge_vfm_f32m4(mask, op1, op2, vl); +vfloat32m4_t test_vfmerge_vfm_f32m4(vfloat32m4_t op1, float op2, vbool8_t mask, size_t vl) { + return vfmerge_vfm_f32m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m8( @@ -101,8 +101,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmerge_vfm_f32m8(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { - return vfmerge_vfm_f32m8(mask, op1, op2, vl); +vfloat32m8_t test_vfmerge_vfm_f32m8(vfloat32m8_t op1, float op2, vbool4_t mask, size_t vl) { + return vfmerge_vfm_f32m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m1( @@ -110,8 +110,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmerge_vfm_f64m1(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { - return vfmerge_vfm_f64m1(mask, op1, op2, vl); +vfloat64m1_t test_vfmerge_vfm_f64m1(vfloat64m1_t op1, double op2, vbool64_t mask, size_t vl) { + return vfmerge_vfm_f64m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m2( @@ -119,8 +119,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmerge_vfm_f64m2(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { - return vfmerge_vfm_f64m2(mask, op1, op2, vl); +vfloat64m2_t test_vfmerge_vfm_f64m2(vfloat64m2_t op1, double op2, vbool32_t mask, size_t vl) { + return vfmerge_vfm_f64m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m4( @@ -128,8 +128,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmerge_vfm_f64m4(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { - return vfmerge_vfm_f64m4(mask, op1, op2, vl); +vfloat64m4_t test_vfmerge_vfm_f64m4(vfloat64m4_t op1, double op2, vbool16_t mask, size_t vl) { + return vfmerge_vfm_f64m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m8( @@ -137,7 +137,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmerge_vfm_f64m8(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { - return vfmerge_vfm_f64m8(mask, op1, op2, vl); +vfloat64m8_t test_vfmerge_vfm_f64m8(vfloat64m8_t op1, double op2, vbool8_t mask, size_t vl) { + return vfmerge_vfm_f64m8(op1, op2, mask, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmerge.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmerge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmerge.c @@ -11,8 +11,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmerge_vvm_i8mf8(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmerge_vvm_i8mf8(mask, op1, op2, vl); +vint8mf8_t test_vmerge_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_i8mf8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf8( @@ -20,8 +20,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmerge_vxm_i8mf8(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmerge_vxm_i8mf8(mask, op1, op2, vl); +vint8mf8_t test_vmerge_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_i8mf8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf4( @@ -29,8 +29,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmerge_vvm_i8mf4(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmerge_vvm_i8mf4(mask, op1, op2, vl); +vint8mf4_t test_vmerge_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_i8mf4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf4( @@ -38,8 +38,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmerge_vxm_i8mf4(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmerge_vxm_i8mf4(mask, op1, op2, vl); +vint8mf4_t test_vmerge_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_i8mf4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf2( @@ -47,8 +47,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmerge_vvm_i8mf2(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmerge_vvm_i8mf2(mask, op1, op2, vl); +vint8mf2_t test_vmerge_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_i8mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf2( @@ -56,8 +56,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmerge_vxm_i8mf2(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmerge_vxm_i8mf2(mask, op1, op2, vl); +vint8mf2_t test_vmerge_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_i8mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m1( @@ -65,8 +65,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmerge_vvm_i8m1(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmerge_vvm_i8m1(mask, op1, op2, vl); +vint8m1_t test_vmerge_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_i8m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m1( @@ -74,8 +74,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmerge_vxm_i8m1(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return vmerge_vxm_i8m1(mask, op1, op2, vl); +vint8m1_t test_vmerge_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_i8m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m2( @@ -83,8 +83,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmerge_vvm_i8m2(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmerge_vvm_i8m2(mask, op1, op2, vl); +vint8m2_t test_vmerge_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_i8m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m2( @@ -92,8 +92,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmerge_vxm_i8m2(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return vmerge_vxm_i8m2(mask, op1, op2, vl); +vint8m2_t test_vmerge_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_i8m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m4( @@ -101,8 +101,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmerge_vvm_i8m4(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmerge_vvm_i8m4(mask, op1, op2, vl); +vint8m4_t test_vmerge_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_i8m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m4( @@ -110,8 +110,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmerge_vxm_i8m4(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return vmerge_vxm_i8m4(mask, op1, op2, vl); +vint8m4_t test_vmerge_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vxm_i8m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m8( @@ -119,8 +119,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmerge_vvm_i8m8(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmerge_vvm_i8m8(mask, op1, op2, vl); +vint8m8_t test_vmerge_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t mask, size_t vl) { + return vmerge_vvm_i8m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m8( @@ -128,8 +128,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmerge_vxm_i8m8(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return vmerge_vxm_i8m8(mask, op1, op2, vl); +vint8m8_t test_vmerge_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t mask, size_t vl) { + return vmerge_vxm_i8m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf4( @@ -137,8 +137,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmerge_vvm_i16mf4(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmerge_vvm_i16mf4(mask, op1, op2, vl); +vint16mf4_t test_vmerge_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_i16mf4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf4( @@ -146,8 +146,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmerge_vxm_i16mf4(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmerge_vxm_i16mf4(mask, op1, op2, vl); +vint16mf4_t test_vmerge_vxm_i16mf4(vint16mf4_t op1, int16_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_i16mf4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf2( @@ -155,8 +155,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmerge_vvm_i16mf2(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmerge_vvm_i16mf2(mask, op1, op2, vl); +vint16mf2_t test_vmerge_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_i16mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf2( @@ -164,8 +164,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmerge_vxm_i16mf2(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmerge_vxm_i16mf2(mask, op1, op2, vl); +vint16mf2_t test_vmerge_vxm_i16mf2(vint16mf2_t op1, int16_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_i16mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m1( @@ -173,8 +173,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmerge_vvm_i16m1(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmerge_vvm_i16m1(mask, op1, op2, vl); +vint16m1_t test_vmerge_vvm_i16m1(vint16m1_t op1, vint16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_i16m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m1( @@ -182,8 +182,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmerge_vxm_i16m1(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return vmerge_vxm_i16m1(mask, op1, op2, vl); +vint16m1_t test_vmerge_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_i16m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m2( @@ -191,8 +191,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmerge_vvm_i16m2(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmerge_vvm_i16m2(mask, op1, op2, vl); +vint16m2_t test_vmerge_vvm_i16m2(vint16m2_t op1, vint16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_i16m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m2( @@ -200,8 +200,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmerge_vxm_i16m2(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return vmerge_vxm_i16m2(mask, op1, op2, vl); +vint16m2_t test_vmerge_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_i16m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m4( @@ -209,8 +209,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmerge_vvm_i16m4(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmerge_vvm_i16m4(mask, op1, op2, vl); +vint16m4_t test_vmerge_vvm_i16m4(vint16m4_t op1, vint16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_i16m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m4( @@ -218,8 +218,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmerge_vxm_i16m4(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return vmerge_vxm_i16m4(mask, op1, op2, vl); +vint16m4_t test_vmerge_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_i16m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m8( @@ -227,8 +227,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmerge_vvm_i16m8(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmerge_vvm_i16m8(mask, op1, op2, vl); +vint16m8_t test_vmerge_vvm_i16m8(vint16m8_t op1, vint16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_i16m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m8( @@ -236,8 +236,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmerge_vxm_i16m8(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return vmerge_vxm_i16m8(mask, op1, op2, vl); +vint16m8_t test_vmerge_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t mask, size_t vl) { + return vmerge_vxm_i16m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2( @@ -245,8 +245,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmerge_vvm_i32mf2(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmerge_vvm_i32mf2(mask, op1, op2, vl); +vint32mf2_t test_vmerge_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_i32mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2( @@ -254,8 +254,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmerge_vxm_i32mf2(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmerge_vxm_i32mf2(mask, op1, op2, vl); +vint32mf2_t test_vmerge_vxm_i32mf2(vint32mf2_t op1, int32_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_i32mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m1( @@ -263,8 +263,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmerge_vvm_i32m1(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmerge_vvm_i32m1(mask, op1, op2, vl); +vint32m1_t test_vmerge_vvm_i32m1(vint32m1_t op1, vint32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_i32m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m1( @@ -272,8 +272,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmerge_vxm_i32m1(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return vmerge_vxm_i32m1(mask, op1, op2, vl); +vint32m1_t test_vmerge_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_i32m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m2( @@ -281,8 +281,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmerge_vvm_i32m2(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmerge_vvm_i32m2(mask, op1, op2, vl); +vint32m2_t test_vmerge_vvm_i32m2(vint32m2_t op1, vint32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_i32m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m2( @@ -290,8 +290,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmerge_vxm_i32m2(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return vmerge_vxm_i32m2(mask, op1, op2, vl); +vint32m2_t test_vmerge_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_i32m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m4( @@ -299,8 +299,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmerge_vvm_i32m4(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmerge_vvm_i32m4(mask, op1, op2, vl); +vint32m4_t test_vmerge_vvm_i32m4(vint32m4_t op1, vint32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_i32m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m4( @@ -308,8 +308,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmerge_vxm_i32m4(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return vmerge_vxm_i32m4(mask, op1, op2, vl); +vint32m4_t test_vmerge_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_i32m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m8( @@ -317,8 +317,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmerge_vvm_i32m8(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmerge_vvm_i32m8(mask, op1, op2, vl); +vint32m8_t test_vmerge_vvm_i32m8(vint32m8_t op1, vint32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_i32m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m8( @@ -326,8 +326,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmerge_vxm_i32m8(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return vmerge_vxm_i32m8(mask, op1, op2, vl); +vint32m8_t test_vmerge_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_i32m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m1( @@ -335,8 +335,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmerge_vvm_i64m1(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmerge_vvm_i64m1(mask, op1, op2, vl); +vint64m1_t test_vmerge_vvm_i64m1(vint64m1_t op1, vint64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_i64m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m1( @@ -344,8 +344,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmerge_vxm_i64m1(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return vmerge_vxm_i64m1(mask, op1, op2, vl); +vint64m1_t test_vmerge_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_i64m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m2( @@ -353,8 +353,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmerge_vvm_i64m2(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmerge_vvm_i64m2(mask, op1, op2, vl); +vint64m2_t test_vmerge_vvm_i64m2(vint64m2_t op1, vint64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_i64m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m2( @@ -362,8 +362,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmerge_vxm_i64m2(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return vmerge_vxm_i64m2(mask, op1, op2, vl); +vint64m2_t test_vmerge_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_i64m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m4( @@ -371,8 +371,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmerge_vvm_i64m4(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmerge_vvm_i64m4(mask, op1, op2, vl); +vint64m4_t test_vmerge_vvm_i64m4(vint64m4_t op1, vint64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_i64m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m4( @@ -380,8 +380,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmerge_vxm_i64m4(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return vmerge_vxm_i64m4(mask, op1, op2, vl); +vint64m4_t test_vmerge_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_i64m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m8( @@ -389,8 +389,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmerge_vvm_i64m8(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmerge_vvm_i64m8(mask, op1, op2, vl); +vint64m8_t test_vmerge_vvm_i64m8(vint64m8_t op1, vint64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_i64m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m8( @@ -398,8 +398,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmerge_vxm_i64m8(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return vmerge_vxm_i64m8(mask, op1, op2, vl); +vint64m8_t test_vmerge_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_i64m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf8( @@ -407,8 +407,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmerge_vvm_u8mf8(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmerge_vvm_u8mf8(mask, op1, op2, vl); +vuint8mf8_t test_vmerge_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_u8mf8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf8( @@ -416,8 +416,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmerge_vxm_u8mf8(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmerge_vxm_u8mf8(mask, op1, op2, vl); +vuint8mf8_t test_vmerge_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_u8mf8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf4( @@ -425,8 +425,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmerge_vvm_u8mf4(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmerge_vvm_u8mf4(mask, op1, op2, vl); +vuint8mf4_t test_vmerge_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_u8mf4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf4( @@ -434,8 +434,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmerge_vxm_u8mf4(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmerge_vxm_u8mf4(mask, op1, op2, vl); +vuint8mf4_t test_vmerge_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_u8mf4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf2( @@ -443,8 +443,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmerge_vvm_u8mf2(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmerge_vvm_u8mf2(mask, op1, op2, vl); +vuint8mf2_t test_vmerge_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_u8mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf2( @@ -452,8 +452,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmerge_vxm_u8mf2(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmerge_vxm_u8mf2(mask, op1, op2, vl); +vuint8mf2_t test_vmerge_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_u8mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m1( @@ -461,8 +461,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmerge_vvm_u8m1(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmerge_vvm_u8m1(mask, op1, op2, vl); +vuint8m1_t test_vmerge_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_u8m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m1( @@ -470,8 +470,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmerge_vxm_u8m1(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmerge_vxm_u8m1(mask, op1, op2, vl); +vuint8m1_t test_vmerge_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_u8m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m2( @@ -479,8 +479,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmerge_vvm_u8m2(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmerge_vvm_u8m2(mask, op1, op2, vl); +vuint8m2_t test_vmerge_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_u8m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m2( @@ -488,8 +488,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmerge_vxm_u8m2(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmerge_vxm_u8m2(mask, op1, op2, vl); +vuint8m2_t test_vmerge_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_u8m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m4( @@ -497,8 +497,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmerge_vvm_u8m4(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmerge_vvm_u8m4(mask, op1, op2, vl); +vuint8m4_t test_vmerge_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_u8m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m4( @@ -506,8 +506,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmerge_vxm_u8m4(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmerge_vxm_u8m4(mask, op1, op2, vl); +vuint8m4_t test_vmerge_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vxm_u8m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m8( @@ -515,8 +515,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmerge_vvm_u8m8(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmerge_vvm_u8m8(mask, op1, op2, vl); +vuint8m8_t test_vmerge_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t mask, size_t vl) { + return vmerge_vvm_u8m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m8( @@ -524,8 +524,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmerge_vxm_u8m8(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmerge_vxm_u8m8(mask, op1, op2, vl); +vuint8m8_t test_vmerge_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t mask, size_t vl) { + return vmerge_vxm_u8m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf4( @@ -533,8 +533,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmerge_vvm_u16mf4(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmerge_vvm_u16mf4(mask, op1, op2, vl); +vuint16mf4_t test_vmerge_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_u16mf4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf4( @@ -542,8 +542,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmerge_vxm_u16mf4(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmerge_vxm_u16mf4(mask, op1, op2, vl); +vuint16mf4_t test_vmerge_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_u16mf4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf2( @@ -551,8 +551,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmerge_vvm_u16mf2(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmerge_vvm_u16mf2(mask, op1, op2, vl); +vuint16mf2_t test_vmerge_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_u16mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf2( @@ -560,8 +560,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmerge_vxm_u16mf2(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmerge_vxm_u16mf2(mask, op1, op2, vl); +vuint16mf2_t test_vmerge_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_u16mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m1( @@ -569,8 +569,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmerge_vvm_u16m1(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmerge_vvm_u16m1(mask, op1, op2, vl); +vuint16m1_t test_vmerge_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_u16m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m1( @@ -578,8 +578,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmerge_vxm_u16m1(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmerge_vxm_u16m1(mask, op1, op2, vl); +vuint16m1_t test_vmerge_vxm_u16m1(vuint16m1_t op1, uint16_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_u16m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m2( @@ -587,8 +587,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmerge_vvm_u16m2(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmerge_vvm_u16m2(mask, op1, op2, vl); +vuint16m2_t test_vmerge_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_u16m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m2( @@ -596,8 +596,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmerge_vxm_u16m2(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmerge_vxm_u16m2(mask, op1, op2, vl); +vuint16m2_t test_vmerge_vxm_u16m2(vuint16m2_t op1, uint16_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_u16m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m4( @@ -605,8 +605,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmerge_vvm_u16m4(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmerge_vvm_u16m4(mask, op1, op2, vl); +vuint16m4_t test_vmerge_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_u16m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m4( @@ -614,8 +614,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmerge_vxm_u16m4(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmerge_vxm_u16m4(mask, op1, op2, vl); +vuint16m4_t test_vmerge_vxm_u16m4(vuint16m4_t op1, uint16_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_u16m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m8( @@ -623,8 +623,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmerge_vvm_u16m8(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmerge_vvm_u16m8(mask, op1, op2, vl); +vuint16m8_t test_vmerge_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_u16m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m8( @@ -632,8 +632,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmerge_vxm_u16m8(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmerge_vxm_u16m8(mask, op1, op2, vl); +vuint16m8_t test_vmerge_vxm_u16m8(vuint16m8_t op1, uint16_t op2, vbool2_t mask, size_t vl) { + return vmerge_vxm_u16m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2( @@ -641,8 +641,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmerge_vvm_u32mf2(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmerge_vvm_u32mf2(mask, op1, op2, vl); +vuint32mf2_t test_vmerge_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_u32mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2( @@ -650,8 +650,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmerge_vxm_u32mf2(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmerge_vxm_u32mf2(mask, op1, op2, vl); +vuint32mf2_t test_vmerge_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_u32mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m1( @@ -659,8 +659,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmerge_vvm_u32m1(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmerge_vvm_u32m1(mask, op1, op2, vl); +vuint32m1_t test_vmerge_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_u32m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m1( @@ -668,8 +668,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmerge_vxm_u32m1(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmerge_vxm_u32m1(mask, op1, op2, vl); +vuint32m1_t test_vmerge_vxm_u32m1(vuint32m1_t op1, uint32_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_u32m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m2( @@ -677,8 +677,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmerge_vvm_u32m2(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmerge_vvm_u32m2(mask, op1, op2, vl); +vuint32m2_t test_vmerge_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_u32m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m2( @@ -686,8 +686,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmerge_vxm_u32m2(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmerge_vxm_u32m2(mask, op1, op2, vl); +vuint32m2_t test_vmerge_vxm_u32m2(vuint32m2_t op1, uint32_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_u32m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m4( @@ -695,8 +695,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmerge_vvm_u32m4(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmerge_vvm_u32m4(mask, op1, op2, vl); +vuint32m4_t test_vmerge_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_u32m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m4( @@ -704,8 +704,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmerge_vxm_u32m4(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmerge_vxm_u32m4(mask, op1, op2, vl); +vuint32m4_t test_vmerge_vxm_u32m4(vuint32m4_t op1, uint32_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_u32m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m8( @@ -713,8 +713,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmerge_vvm_u32m8(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmerge_vvm_u32m8(mask, op1, op2, vl); +vuint32m8_t test_vmerge_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_u32m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m8( @@ -722,8 +722,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmerge_vxm_u32m8(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmerge_vxm_u32m8(mask, op1, op2, vl); +vuint32m8_t test_vmerge_vxm_u32m8(vuint32m8_t op1, uint32_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_u32m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m1( @@ -731,8 +731,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmerge_vvm_u64m1(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmerge_vvm_u64m1(mask, op1, op2, vl); +vuint64m1_t test_vmerge_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_u64m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m1( @@ -740,8 +740,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmerge_vxm_u64m1(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmerge_vxm_u64m1(mask, op1, op2, vl); +vuint64m1_t test_vmerge_vxm_u64m1(vuint64m1_t op1, uint64_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_u64m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m2( @@ -749,8 +749,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmerge_vvm_u64m2(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmerge_vvm_u64m2(mask, op1, op2, vl); +vuint64m2_t test_vmerge_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_u64m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m2( @@ -758,8 +758,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmerge_vxm_u64m2(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmerge_vxm_u64m2(mask, op1, op2, vl); +vuint64m2_t test_vmerge_vxm_u64m2(vuint64m2_t op1, uint64_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_u64m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m4( @@ -767,8 +767,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmerge_vvm_u64m4(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmerge_vvm_u64m4(mask, op1, op2, vl); +vuint64m4_t test_vmerge_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_u64m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m4( @@ -776,8 +776,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmerge_vxm_u64m4(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmerge_vxm_u64m4(mask, op1, op2, vl); +vuint64m4_t test_vmerge_vxm_u64m4(vuint64m4_t op1, uint64_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_u64m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m8( @@ -785,8 +785,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmerge_vvm_u64m8(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmerge_vvm_u64m8(mask, op1, op2, vl); +vuint64m8_t test_vmerge_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_u64m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m8( @@ -794,8 +794,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmerge_vxm_u64m8(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmerge_vxm_u64m8(mask, op1, op2, vl); +vuint64m8_t test_vmerge_vxm_u64m8(vuint64m8_t op1, uint64_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_u64m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf4( @@ -803,8 +803,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vmerge_vvm_f16mf4(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmerge_vvm_f16mf4(mask, op1, op2, vl); +vfloat16mf4_t test_vmerge_vvm_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_f16mf4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf2( @@ -812,8 +812,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vmerge_vvm_f16mf2(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmerge_vvm_f16mf2(mask, op1, op2, vl); +vfloat16mf2_t test_vmerge_vvm_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_f16mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m1( @@ -821,8 +821,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vmerge_vvm_f16m1(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmerge_vvm_f16m1(mask, op1, op2, vl); +vfloat16m1_t test_vmerge_vvm_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_f16m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m2( @@ -830,8 +830,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vmerge_vvm_f16m2(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmerge_vvm_f16m2(mask, op1, op2, vl); +vfloat16m2_t test_vmerge_vvm_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_f16m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m4( @@ -839,8 +839,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vmerge_vvm_f16m4(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmerge_vvm_f16m4(mask, op1, op2, vl); +vfloat16m4_t test_vmerge_vvm_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_f16m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m8( @@ -848,8 +848,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vmerge_vvm_f16m8(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmerge_vvm_f16m8(mask, op1, op2, vl); +vfloat16m8_t test_vmerge_vvm_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_f16m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2( @@ -857,8 +857,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vmerge_vvm_f32mf2(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmerge_vvm_f32mf2(mask, op1, op2, vl); +vfloat32mf2_t test_vmerge_vvm_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_f32mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m1( @@ -866,8 +866,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vmerge_vvm_f32m1(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vmerge_vvm_f32m1(mask, op1, op2, vl); +vfloat32m1_t test_vmerge_vvm_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_f32m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m2( @@ -875,8 +875,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vmerge_vvm_f32m2(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vmerge_vvm_f32m2(mask, op1, op2, vl); +vfloat32m2_t test_vmerge_vvm_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_f32m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m4( @@ -884,8 +884,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vmerge_vvm_f32m4(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vmerge_vvm_f32m4(mask, op1, op2, vl); +vfloat32m4_t test_vmerge_vvm_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_f32m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m8( @@ -893,8 +893,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vmerge_vvm_f32m8(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vmerge_vvm_f32m8(mask, op1, op2, vl); +vfloat32m8_t test_vmerge_vvm_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_f32m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m1( @@ -902,8 +902,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vmerge_vvm_f64m1(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vmerge_vvm_f64m1(mask, op1, op2, vl); +vfloat64m1_t test_vmerge_vvm_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_f64m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m2( @@ -911,8 +911,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vmerge_vvm_f64m2(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vmerge_vvm_f64m2(mask, op1, op2, vl); +vfloat64m2_t test_vmerge_vvm_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_f64m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m4( @@ -920,8 +920,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vmerge_vvm_f64m4(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vmerge_vvm_f64m4(mask, op1, op2, vl); +vfloat64m4_t test_vmerge_vvm_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_f64m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m8( @@ -929,7 +929,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vmerge_vvm_f64m8(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vmerge_vvm_f64m8(mask, op1, op2, vl); +vfloat64m8_t test_vmerge_vvm_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_f64m8(op1, op2, mask, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcompress.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcompress.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcompress.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcompress.c @@ -11,8 +11,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vcompress_vm_i8mf8(vbool64_t mask, vint8mf8_t dest, vint8mf8_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint8mf8_t test_vcompress_vm_i8mf8(vint8mf8_t dest, vint8mf8_t src, vbool64_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf4( @@ -20,8 +20,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vcompress_vm_i8mf4(vbool32_t mask, vint8mf4_t dest, vint8mf4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint8mf4_t test_vcompress_vm_i8mf4(vint8mf4_t dest, vint8mf4_t src, vbool32_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf2( @@ -29,8 +29,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vcompress_vm_i8mf2(vbool16_t mask, vint8mf2_t dest, vint8mf2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint8mf2_t test_vcompress_vm_i8mf2(vint8mf2_t dest, vint8mf2_t src, vbool16_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m1( @@ -38,8 +38,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vcompress_vm_i8m1(vbool8_t mask, vint8m1_t dest, vint8m1_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint8m1_t test_vcompress_vm_i8m1(vint8m1_t dest, vint8m1_t src, vbool8_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m2( @@ -47,8 +47,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vcompress_vm_i8m2(vbool4_t mask, vint8m2_t dest, vint8m2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint8m2_t test_vcompress_vm_i8m2(vint8m2_t dest, vint8m2_t src, vbool4_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m4( @@ -56,8 +56,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vcompress_vm_i8m4(vbool2_t mask, vint8m4_t dest, vint8m4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint8m4_t test_vcompress_vm_i8m4(vint8m4_t dest, vint8m4_t src, vbool2_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m8( @@ -65,8 +65,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv64i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vcompress_vm_i8m8(vbool1_t mask, vint8m8_t dest, vint8m8_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint8m8_t test_vcompress_vm_i8m8(vint8m8_t dest, vint8m8_t src, vbool1_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf4( @@ -74,8 +74,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vcompress_vm_i16mf4(vbool64_t mask, vint16mf4_t dest, vint16mf4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint16mf4_t test_vcompress_vm_i16mf4(vint16mf4_t dest, vint16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf2( @@ -83,8 +83,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vcompress_vm_i16mf2(vbool32_t mask, vint16mf2_t dest, vint16mf2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint16mf2_t test_vcompress_vm_i16mf2(vint16mf2_t dest, vint16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m1( @@ -92,8 +92,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vcompress_vm_i16m1(vbool16_t mask, vint16m1_t dest, vint16m1_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint16m1_t test_vcompress_vm_i16m1(vint16m1_t dest, vint16m1_t src, vbool16_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m2( @@ -101,8 +101,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vcompress_vm_i16m2(vbool8_t mask, vint16m2_t dest, vint16m2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint16m2_t test_vcompress_vm_i16m2(vint16m2_t dest, vint16m2_t src, vbool8_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m4( @@ -110,8 +110,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vcompress_vm_i16m4(vbool4_t mask, vint16m4_t dest, vint16m4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint16m4_t test_vcompress_vm_i16m4(vint16m4_t dest, vint16m4_t src, vbool4_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m8( @@ -119,8 +119,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vcompress_vm_i16m8(vbool2_t mask, vint16m8_t dest, vint16m8_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint16m8_t test_vcompress_vm_i16m8(vint16m8_t dest, vint16m8_t src, vbool2_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2( @@ -128,8 +128,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vcompress_vm_i32mf2(vbool64_t mask, vint32mf2_t dest, vint32mf2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint32mf2_t test_vcompress_vm_i32mf2(vint32mf2_t dest, vint32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m1( @@ -137,8 +137,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vcompress_vm_i32m1(vbool32_t mask, vint32m1_t dest, vint32m1_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint32m1_t test_vcompress_vm_i32m1(vint32m1_t dest, vint32m1_t src, vbool32_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m2( @@ -146,8 +146,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vcompress_vm_i32m2(vbool16_t mask, vint32m2_t dest, vint32m2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint32m2_t test_vcompress_vm_i32m2(vint32m2_t dest, vint32m2_t src, vbool16_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m4( @@ -155,8 +155,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vcompress_vm_i32m4(vbool8_t mask, vint32m4_t dest, vint32m4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint32m4_t test_vcompress_vm_i32m4(vint32m4_t dest, vint32m4_t src, vbool8_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m8( @@ -164,8 +164,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vcompress_vm_i32m8(vbool4_t mask, vint32m8_t dest, vint32m8_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint32m8_t test_vcompress_vm_i32m8(vint32m8_t dest, vint32m8_t src, vbool4_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m1( @@ -173,8 +173,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vcompress_vm_i64m1(vbool64_t mask, vint64m1_t dest, vint64m1_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint64m1_t test_vcompress_vm_i64m1(vint64m1_t dest, vint64m1_t src, vbool64_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m2( @@ -182,8 +182,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vcompress_vm_i64m2(vbool32_t mask, vint64m2_t dest, vint64m2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint64m2_t test_vcompress_vm_i64m2(vint64m2_t dest, vint64m2_t src, vbool32_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m4( @@ -191,8 +191,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vcompress_vm_i64m4(vbool16_t mask, vint64m4_t dest, vint64m4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint64m4_t test_vcompress_vm_i64m4(vint64m4_t dest, vint64m4_t src, vbool16_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m8( @@ -200,8 +200,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vcompress_vm_i64m8(vbool8_t mask, vint64m8_t dest, vint64m8_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint64m8_t test_vcompress_vm_i64m8(vint64m8_t dest, vint64m8_t src, vbool8_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf8( @@ -209,8 +209,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vcompress_vm_u8mf8(vbool64_t mask, vuint8mf8_t dest, vuint8mf8_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint8mf8_t test_vcompress_vm_u8mf8(vuint8mf8_t dest, vuint8mf8_t src, vbool64_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf4( @@ -218,8 +218,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vcompress_vm_u8mf4(vbool32_t mask, vuint8mf4_t dest, vuint8mf4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint8mf4_t test_vcompress_vm_u8mf4(vuint8mf4_t dest, vuint8mf4_t src, vbool32_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf2( @@ -227,8 +227,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vcompress_vm_u8mf2(vbool16_t mask, vuint8mf2_t dest, vuint8mf2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint8mf2_t test_vcompress_vm_u8mf2(vuint8mf2_t dest, vuint8mf2_t src, vbool16_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m1( @@ -236,8 +236,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vcompress_vm_u8m1(vbool8_t mask, vuint8m1_t dest, vuint8m1_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint8m1_t test_vcompress_vm_u8m1(vuint8m1_t dest, vuint8m1_t src, vbool8_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m2( @@ -245,8 +245,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vcompress_vm_u8m2(vbool4_t mask, vuint8m2_t dest, vuint8m2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint8m2_t test_vcompress_vm_u8m2(vuint8m2_t dest, vuint8m2_t src, vbool4_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m4( @@ -254,8 +254,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vcompress_vm_u8m4(vbool2_t mask, vuint8m4_t dest, vuint8m4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint8m4_t test_vcompress_vm_u8m4(vuint8m4_t dest, vuint8m4_t src, vbool2_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m8( @@ -263,8 +263,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv64i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vcompress_vm_u8m8(vbool1_t mask, vuint8m8_t dest, vuint8m8_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint8m8_t test_vcompress_vm_u8m8(vuint8m8_t dest, vuint8m8_t src, vbool1_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf4( @@ -272,8 +272,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vcompress_vm_u16mf4(vbool64_t mask, vuint16mf4_t dest, vuint16mf4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint16mf4_t test_vcompress_vm_u16mf4(vuint16mf4_t dest, vuint16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf2( @@ -281,8 +281,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vcompress_vm_u16mf2(vbool32_t mask, vuint16mf2_t dest, vuint16mf2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint16mf2_t test_vcompress_vm_u16mf2(vuint16mf2_t dest, vuint16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m1( @@ -290,8 +290,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vcompress_vm_u16m1(vbool16_t mask, vuint16m1_t dest, vuint16m1_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint16m1_t test_vcompress_vm_u16m1(vuint16m1_t dest, vuint16m1_t src, vbool16_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m2( @@ -299,8 +299,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vcompress_vm_u16m2(vbool8_t mask, vuint16m2_t dest, vuint16m2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint16m2_t test_vcompress_vm_u16m2(vuint16m2_t dest, vuint16m2_t src, vbool8_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m4( @@ -308,8 +308,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vcompress_vm_u16m4(vbool4_t mask, vuint16m4_t dest, vuint16m4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint16m4_t test_vcompress_vm_u16m4(vuint16m4_t dest, vuint16m4_t src, vbool4_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m8( @@ -317,8 +317,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vcompress_vm_u16m8(vbool2_t mask, vuint16m8_t dest, vuint16m8_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint16m8_t test_vcompress_vm_u16m8(vuint16m8_t dest, vuint16m8_t src, vbool2_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2( @@ -326,8 +326,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vcompress_vm_u32mf2(vbool64_t mask, vuint32mf2_t dest, vuint32mf2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint32mf2_t test_vcompress_vm_u32mf2(vuint32mf2_t dest, vuint32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m1( @@ -335,8 +335,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vcompress_vm_u32m1(vbool32_t mask, vuint32m1_t dest, vuint32m1_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint32m1_t test_vcompress_vm_u32m1(vuint32m1_t dest, vuint32m1_t src, vbool32_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m2( @@ -344,8 +344,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vcompress_vm_u32m2(vbool16_t mask, vuint32m2_t dest, vuint32m2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint32m2_t test_vcompress_vm_u32m2(vuint32m2_t dest, vuint32m2_t src, vbool16_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m4( @@ -353,8 +353,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vcompress_vm_u32m4(vbool8_t mask, vuint32m4_t dest, vuint32m4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint32m4_t test_vcompress_vm_u32m4(vuint32m4_t dest, vuint32m4_t src, vbool8_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m8( @@ -362,8 +362,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vcompress_vm_u32m8(vbool4_t mask, vuint32m8_t dest, vuint32m8_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint32m8_t test_vcompress_vm_u32m8(vuint32m8_t dest, vuint32m8_t src, vbool4_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m1( @@ -371,8 +371,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vcompress_vm_u64m1(vbool64_t mask, vuint64m1_t dest, vuint64m1_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint64m1_t test_vcompress_vm_u64m1(vuint64m1_t dest, vuint64m1_t src, vbool64_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m2( @@ -380,8 +380,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vcompress_vm_u64m2(vbool32_t mask, vuint64m2_t dest, vuint64m2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint64m2_t test_vcompress_vm_u64m2(vuint64m2_t dest, vuint64m2_t src, vbool32_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m4( @@ -389,8 +389,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vcompress_vm_u64m4(vbool16_t mask, vuint64m4_t dest, vuint64m4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint64m4_t test_vcompress_vm_u64m4(vuint64m4_t dest, vuint64m4_t src, vbool16_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m8( @@ -398,8 +398,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vcompress_vm_u64m8(vbool8_t mask, vuint64m8_t dest, vuint64m8_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint64m8_t test_vcompress_vm_u64m8(vuint64m8_t dest, vuint64m8_t src, vbool8_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16mf4( @@ -407,8 +407,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vcompress_vm_f16mf4(vbool64_t mask, vfloat16mf4_t dest, vfloat16mf4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vfloat16mf4_t test_vcompress_vm_f16mf4(vfloat16mf4_t dest, vfloat16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16mf2( @@ -416,8 +416,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vcompress_vm_f16mf2(vbool32_t mask, vfloat16mf2_t dest, vfloat16mf2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vfloat16mf2_t test_vcompress_vm_f16mf2(vfloat16mf2_t dest, vfloat16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16m1( @@ -425,8 +425,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vcompress_vm_f16m1(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vfloat16m1_t test_vcompress_vm_f16m1(vfloat16m1_t dest, vfloat16m1_t src, vbool16_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16m2( @@ -434,8 +434,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vcompress_vm_f16m2(vbool8_t mask, vfloat16m2_t dest, vfloat16m2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vfloat16m2_t test_vcompress_vm_f16m2(vfloat16m2_t dest, vfloat16m2_t src, vbool8_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16m4( @@ -443,8 +443,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vcompress_vm_f16m4(vbool4_t mask, vfloat16m4_t dest, vfloat16m4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vfloat16m4_t test_vcompress_vm_f16m4(vfloat16m4_t dest, vfloat16m4_t src, vbool4_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16m8( @@ -452,8 +452,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vcompress_vm_f16m8(vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vfloat16m8_t test_vcompress_vm_f16m8(vfloat16m8_t dest, vfloat16m8_t src, vbool2_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2( @@ -461,8 +461,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vcompress_vm_f32mf2(vbool64_t mask, vfloat32mf2_t dest, vfloat32mf2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vfloat32mf2_t test_vcompress_vm_f32mf2(vfloat32mf2_t dest, vfloat32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m1( @@ -470,8 +470,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vcompress_vm_f32m1(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vfloat32m1_t test_vcompress_vm_f32m1(vfloat32m1_t dest, vfloat32m1_t src, vbool32_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m2( @@ -479,8 +479,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vcompress_vm_f32m2(vbool16_t mask, vfloat32m2_t dest, vfloat32m2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vfloat32m2_t test_vcompress_vm_f32m2(vfloat32m2_t dest, vfloat32m2_t src, vbool16_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m4( @@ -488,8 +488,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vcompress_vm_f32m4(vbool8_t mask, vfloat32m4_t dest, vfloat32m4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vfloat32m4_t test_vcompress_vm_f32m4(vfloat32m4_t dest, vfloat32m4_t src, vbool8_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m8( @@ -497,8 +497,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vcompress_vm_f32m8(vbool4_t mask, vfloat32m8_t dest, vfloat32m8_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vfloat32m8_t test_vcompress_vm_f32m8(vfloat32m8_t dest, vfloat32m8_t src, vbool4_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m1( @@ -506,8 +506,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vcompress_vm_f64m1(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vfloat64m1_t test_vcompress_vm_f64m1(vfloat64m1_t dest, vfloat64m1_t src, vbool64_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m2( @@ -515,8 +515,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vcompress_vm_f64m2(vbool32_t mask, vfloat64m2_t dest, vfloat64m2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vfloat64m2_t test_vcompress_vm_f64m2(vfloat64m2_t dest, vfloat64m2_t src, vbool32_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m4( @@ -524,8 +524,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vcompress_vm_f64m4(vbool16_t mask, vfloat64m4_t dest, vfloat64m4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vfloat64m4_t test_vcompress_vm_f64m4(vfloat64m4_t dest, vfloat64m4_t src, vbool16_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m8( @@ -533,7 +533,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vcompress_vm_f64m8(vbool8_t mask, vfloat64m8_t dest, vfloat64m8_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vfloat64m8_t test_vcompress_vm_f64m8(vfloat64m8_t dest, vfloat64m8_t src, vbool8_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmerge.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmerge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmerge.c @@ -11,8 +11,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmerge_vfm_f16mf4(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfmerge(mask, op1, op2, vl); +vfloat16mf4_t test_vfmerge_vfm_f16mf4(vfloat16mf4_t op1, _Float16 op2, vbool64_t mask, size_t vl) { + return vfmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16mf2( @@ -20,8 +20,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmerge_vfm_f16mf2(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfmerge(mask, op1, op2, vl); +vfloat16mf2_t test_vfmerge_vfm_f16mf2(vfloat16mf2_t op1, _Float16 op2, vbool32_t mask, size_t vl) { + return vfmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m1( @@ -29,8 +29,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmerge_vfm_f16m1(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfmerge(mask, op1, op2, vl); +vfloat16m1_t test_vfmerge_vfm_f16m1(vfloat16m1_t op1, _Float16 op2, vbool16_t mask, size_t vl) { + return vfmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m2( @@ -38,8 +38,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmerge_vfm_f16m2(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfmerge(mask, op1, op2, vl); +vfloat16m2_t test_vfmerge_vfm_f16m2(vfloat16m2_t op1, _Float16 op2, vbool8_t mask, size_t vl) { + return vfmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m4( @@ -47,8 +47,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmerge_vfm_f16m4(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfmerge(mask, op1, op2, vl); +vfloat16m4_t test_vfmerge_vfm_f16m4(vfloat16m4_t op1, _Float16 op2, vbool4_t mask, size_t vl) { + return vfmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m8( @@ -56,8 +56,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmerge_vfm_f16m8(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfmerge(mask, op1, op2, vl); +vfloat16m8_t test_vfmerge_vfm_f16m8(vfloat16m8_t op1, _Float16 op2, vbool2_t mask, size_t vl) { + return vfmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2( @@ -65,8 +65,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmerge_vfm_f32mf2(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { - return vfmerge(mask, op1, op2, vl); +vfloat32mf2_t test_vfmerge_vfm_f32mf2(vfloat32mf2_t op1, float op2, vbool64_t mask, size_t vl) { + return vfmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m1( @@ -74,8 +74,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmerge_vfm_f32m1(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return vfmerge(mask, op1, op2, vl); +vfloat32m1_t test_vfmerge_vfm_f32m1(vfloat32m1_t op1, float op2, vbool32_t mask, size_t vl) { + return vfmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m2( @@ -83,8 +83,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmerge_vfm_f32m2(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return vfmerge(mask, op1, op2, vl); +vfloat32m2_t test_vfmerge_vfm_f32m2(vfloat32m2_t op1, float op2, vbool16_t mask, size_t vl) { + return vfmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m4( @@ -92,8 +92,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmerge_vfm_f32m4(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return vfmerge(mask, op1, op2, vl); +vfloat32m4_t test_vfmerge_vfm_f32m4(vfloat32m4_t op1, float op2, vbool8_t mask, size_t vl) { + return vfmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m8( @@ -101,8 +101,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmerge_vfm_f32m8(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { - return vfmerge(mask, op1, op2, vl); +vfloat32m8_t test_vfmerge_vfm_f32m8(vfloat32m8_t op1, float op2, vbool4_t mask, size_t vl) { + return vfmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m1( @@ -110,8 +110,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmerge_vfm_f64m1(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { - return vfmerge(mask, op1, op2, vl); +vfloat64m1_t test_vfmerge_vfm_f64m1(vfloat64m1_t op1, double op2, vbool64_t mask, size_t vl) { + return vfmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m2( @@ -119,8 +119,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmerge_vfm_f64m2(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { - return vfmerge(mask, op1, op2, vl); +vfloat64m2_t test_vfmerge_vfm_f64m2(vfloat64m2_t op1, double op2, vbool32_t mask, size_t vl) { + return vfmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m4( @@ -128,8 +128,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmerge_vfm_f64m4(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { - return vfmerge(mask, op1, op2, vl); +vfloat64m4_t test_vfmerge_vfm_f64m4(vfloat64m4_t op1, double op2, vbool16_t mask, size_t vl) { + return vfmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m8( @@ -137,7 +137,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmerge_vfm_f64m8(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { - return vfmerge(mask, op1, op2, vl); +vfloat64m8_t test_vfmerge_vfm_f64m8(vfloat64m8_t op1, double op2, vbool8_t mask, size_t vl) { + return vfmerge(op1, op2, mask, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmerge.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmerge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmerge.c @@ -11,8 +11,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmerge_vvm_i8mf8(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint8mf8_t test_vmerge_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf8( @@ -20,8 +20,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmerge_vxm_i8mf8(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint8mf8_t test_vmerge_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf4( @@ -29,8 +29,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmerge_vvm_i8mf4(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint8mf4_t test_vmerge_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf4( @@ -38,8 +38,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmerge_vxm_i8mf4(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint8mf4_t test_vmerge_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf2( @@ -47,8 +47,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmerge_vvm_i8mf2(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint8mf2_t test_vmerge_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf2( @@ -56,8 +56,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmerge_vxm_i8mf2(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint8mf2_t test_vmerge_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m1( @@ -65,8 +65,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmerge_vvm_i8m1(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint8m1_t test_vmerge_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m1( @@ -74,8 +74,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmerge_vxm_i8m1(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint8m1_t test_vmerge_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m2( @@ -83,8 +83,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmerge_vvm_i8m2(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint8m2_t test_vmerge_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m2( @@ -92,8 +92,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmerge_vxm_i8m2(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint8m2_t test_vmerge_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m4( @@ -101,8 +101,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmerge_vvm_i8m4(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint8m4_t test_vmerge_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m4( @@ -110,8 +110,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmerge_vxm_i8m4(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint8m4_t test_vmerge_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m8( @@ -119,8 +119,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmerge_vvm_i8m8(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint8m8_t test_vmerge_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m8( @@ -128,8 +128,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmerge_vxm_i8m8(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint8m8_t test_vmerge_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf4( @@ -137,8 +137,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmerge_vvm_i16mf4(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint16mf4_t test_vmerge_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf4( @@ -146,8 +146,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmerge_vxm_i16mf4(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint16mf4_t test_vmerge_vxm_i16mf4(vint16mf4_t op1, int16_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf2( @@ -155,8 +155,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmerge_vvm_i16mf2(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint16mf2_t test_vmerge_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf2( @@ -164,8 +164,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmerge_vxm_i16mf2(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint16mf2_t test_vmerge_vxm_i16mf2(vint16mf2_t op1, int16_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m1( @@ -173,8 +173,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmerge_vvm_i16m1(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint16m1_t test_vmerge_vvm_i16m1(vint16m1_t op1, vint16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m1( @@ -182,8 +182,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmerge_vxm_i16m1(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint16m1_t test_vmerge_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m2( @@ -191,8 +191,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmerge_vvm_i16m2(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint16m2_t test_vmerge_vvm_i16m2(vint16m2_t op1, vint16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m2( @@ -200,8 +200,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmerge_vxm_i16m2(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint16m2_t test_vmerge_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m4( @@ -209,8 +209,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmerge_vvm_i16m4(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint16m4_t test_vmerge_vvm_i16m4(vint16m4_t op1, vint16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m4( @@ -218,8 +218,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmerge_vxm_i16m4(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint16m4_t test_vmerge_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m8( @@ -227,8 +227,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmerge_vvm_i16m8(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint16m8_t test_vmerge_vvm_i16m8(vint16m8_t op1, vint16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m8( @@ -236,8 +236,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmerge_vxm_i16m8(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint16m8_t test_vmerge_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2( @@ -245,8 +245,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmerge_vvm_i32mf2(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint32mf2_t test_vmerge_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2( @@ -254,8 +254,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmerge_vxm_i32mf2(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint32mf2_t test_vmerge_vxm_i32mf2(vint32mf2_t op1, int32_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m1( @@ -263,8 +263,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmerge_vvm_i32m1(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint32m1_t test_vmerge_vvm_i32m1(vint32m1_t op1, vint32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m1( @@ -272,8 +272,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmerge_vxm_i32m1(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint32m1_t test_vmerge_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m2( @@ -281,8 +281,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmerge_vvm_i32m2(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint32m2_t test_vmerge_vvm_i32m2(vint32m2_t op1, vint32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m2( @@ -290,8 +290,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmerge_vxm_i32m2(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint32m2_t test_vmerge_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m4( @@ -299,8 +299,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmerge_vvm_i32m4(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint32m4_t test_vmerge_vvm_i32m4(vint32m4_t op1, vint32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m4( @@ -308,8 +308,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmerge_vxm_i32m4(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint32m4_t test_vmerge_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m8( @@ -317,8 +317,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmerge_vvm_i32m8(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint32m8_t test_vmerge_vvm_i32m8(vint32m8_t op1, vint32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m8( @@ -326,8 +326,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmerge_vxm_i32m8(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint32m8_t test_vmerge_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m1( @@ -335,8 +335,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmerge_vvm_i64m1(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint64m1_t test_vmerge_vvm_i64m1(vint64m1_t op1, vint64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m1( @@ -344,8 +344,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmerge_vxm_i64m1(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint64m1_t test_vmerge_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m2( @@ -353,8 +353,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmerge_vvm_i64m2(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint64m2_t test_vmerge_vvm_i64m2(vint64m2_t op1, vint64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m2( @@ -362,8 +362,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmerge_vxm_i64m2(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint64m2_t test_vmerge_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m4( @@ -371,8 +371,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmerge_vvm_i64m4(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint64m4_t test_vmerge_vvm_i64m4(vint64m4_t op1, vint64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m4( @@ -380,8 +380,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmerge_vxm_i64m4(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint64m4_t test_vmerge_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m8( @@ -389,8 +389,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmerge_vvm_i64m8(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint64m8_t test_vmerge_vvm_i64m8(vint64m8_t op1, vint64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m8( @@ -398,8 +398,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmerge_vxm_i64m8(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint64m8_t test_vmerge_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf8( @@ -407,8 +407,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmerge_vvm_u8mf8(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint8mf8_t test_vmerge_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf8( @@ -416,8 +416,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmerge_vxm_u8mf8(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint8mf8_t test_vmerge_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf4( @@ -425,8 +425,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmerge_vvm_u8mf4(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint8mf4_t test_vmerge_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf4( @@ -434,8 +434,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmerge_vxm_u8mf4(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint8mf4_t test_vmerge_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf2( @@ -443,8 +443,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmerge_vvm_u8mf2(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint8mf2_t test_vmerge_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf2( @@ -452,8 +452,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmerge_vxm_u8mf2(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint8mf2_t test_vmerge_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m1( @@ -461,8 +461,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmerge_vvm_u8m1(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint8m1_t test_vmerge_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m1( @@ -470,8 +470,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmerge_vxm_u8m1(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint8m1_t test_vmerge_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m2( @@ -479,8 +479,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmerge_vvm_u8m2(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint8m2_t test_vmerge_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m2( @@ -488,8 +488,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmerge_vxm_u8m2(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint8m2_t test_vmerge_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m4( @@ -497,8 +497,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmerge_vvm_u8m4(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint8m4_t test_vmerge_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m4( @@ -506,8 +506,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmerge_vxm_u8m4(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint8m4_t test_vmerge_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m8( @@ -515,8 +515,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmerge_vvm_u8m8(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint8m8_t test_vmerge_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m8( @@ -524,8 +524,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmerge_vxm_u8m8(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint8m8_t test_vmerge_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf4( @@ -533,8 +533,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmerge_vvm_u16mf4(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint16mf4_t test_vmerge_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf4( @@ -542,8 +542,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmerge_vxm_u16mf4(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint16mf4_t test_vmerge_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf2( @@ -551,8 +551,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmerge_vvm_u16mf2(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint16mf2_t test_vmerge_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf2( @@ -560,8 +560,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmerge_vxm_u16mf2(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint16mf2_t test_vmerge_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m1( @@ -569,8 +569,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmerge_vvm_u16m1(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint16m1_t test_vmerge_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m1( @@ -578,8 +578,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmerge_vxm_u16m1(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint16m1_t test_vmerge_vxm_u16m1(vuint16m1_t op1, uint16_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m2( @@ -587,8 +587,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmerge_vvm_u16m2(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint16m2_t test_vmerge_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m2( @@ -596,8 +596,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmerge_vxm_u16m2(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint16m2_t test_vmerge_vxm_u16m2(vuint16m2_t op1, uint16_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m4( @@ -605,8 +605,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmerge_vvm_u16m4(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint16m4_t test_vmerge_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m4( @@ -614,8 +614,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmerge_vxm_u16m4(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint16m4_t test_vmerge_vxm_u16m4(vuint16m4_t op1, uint16_t op2, vbool4_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m8( @@ -623,8 +623,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmerge_vvm_u16m8(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint16m8_t test_vmerge_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m8( @@ -632,8 +632,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmerge_vxm_u16m8(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint16m8_t test_vmerge_vxm_u16m8(vuint16m8_t op1, uint16_t op2, vbool2_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2( @@ -641,8 +641,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmerge_vvm_u32mf2(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint32mf2_t test_vmerge_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2( @@ -650,8 +650,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmerge_vxm_u32mf2(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint32mf2_t test_vmerge_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m1( @@ -659,8 +659,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmerge_vvm_u32m1(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint32m1_t test_vmerge_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m1( @@ -668,8 +668,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmerge_vxm_u32m1(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint32m1_t test_vmerge_vxm_u32m1(vuint32m1_t op1, uint32_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m2( @@ -677,8 +677,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmerge_vvm_u32m2(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint32m2_t test_vmerge_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m2( @@ -686,8 +686,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmerge_vxm_u32m2(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint32m2_t test_vmerge_vxm_u32m2(vuint32m2_t op1, uint32_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m4( @@ -695,8 +695,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmerge_vvm_u32m4(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint32m4_t test_vmerge_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m4( @@ -704,8 +704,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmerge_vxm_u32m4(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint32m4_t test_vmerge_vxm_u32m4(vuint32m4_t op1, uint32_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m8( @@ -713,8 +713,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmerge_vvm_u32m8(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint32m8_t test_vmerge_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m8( @@ -722,8 +722,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmerge_vxm_u32m8(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint32m8_t test_vmerge_vxm_u32m8(vuint32m8_t op1, uint32_t op2, vbool4_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m1( @@ -731,8 +731,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmerge_vvm_u64m1(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint64m1_t test_vmerge_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m1( @@ -740,8 +740,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmerge_vxm_u64m1(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint64m1_t test_vmerge_vxm_u64m1(vuint64m1_t op1, uint64_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m2( @@ -749,8 +749,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmerge_vvm_u64m2(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint64m2_t test_vmerge_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m2( @@ -758,8 +758,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmerge_vxm_u64m2(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint64m2_t test_vmerge_vxm_u64m2(vuint64m2_t op1, uint64_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m4( @@ -767,8 +767,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmerge_vvm_u64m4(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint64m4_t test_vmerge_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m4( @@ -776,8 +776,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmerge_vxm_u64m4(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint64m4_t test_vmerge_vxm_u64m4(vuint64m4_t op1, uint64_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m8( @@ -785,8 +785,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmerge_vvm_u64m8(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint64m8_t test_vmerge_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m8( @@ -794,8 +794,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmerge_vxm_u64m8(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint64m8_t test_vmerge_vxm_u64m8(vuint64m8_t op1, uint64_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf4( @@ -803,8 +803,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vmerge_vvm_f16mf4(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vfloat16mf4_t test_vmerge_vvm_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf2( @@ -812,8 +812,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vmerge_vvm_f16mf2(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vfloat16mf2_t test_vmerge_vvm_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m1( @@ -821,8 +821,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vmerge_vvm_f16m1(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vfloat16m1_t test_vmerge_vvm_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m2( @@ -830,8 +830,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vmerge_vvm_f16m2(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vfloat16m2_t test_vmerge_vvm_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m4( @@ -839,8 +839,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vmerge_vvm_f16m4(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vfloat16m4_t test_vmerge_vvm_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m8( @@ -848,8 +848,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vmerge_vvm_f16m8(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vfloat16m8_t test_vmerge_vvm_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2( @@ -857,8 +857,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vmerge_vvm_f32mf2(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vfloat32mf2_t test_vmerge_vvm_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m1( @@ -866,8 +866,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vmerge_vvm_f32m1(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vfloat32m1_t test_vmerge_vvm_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m2( @@ -875,8 +875,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vmerge_vvm_f32m2(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vfloat32m2_t test_vmerge_vvm_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m4( @@ -884,8 +884,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vmerge_vvm_f32m4(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vfloat32m4_t test_vmerge_vvm_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m8( @@ -893,8 +893,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vmerge_vvm_f32m8(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vfloat32m8_t test_vmerge_vvm_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m1( @@ -902,8 +902,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vmerge_vvm_f64m1(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vfloat64m1_t test_vmerge_vvm_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m2( @@ -911,8 +911,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vmerge_vvm_f64m2(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vfloat64m2_t test_vmerge_vvm_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m4( @@ -920,8 +920,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vmerge_vvm_f64m4(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vfloat64m4_t test_vmerge_vvm_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m8( @@ -929,7 +929,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vmerge_vvm_f64m8(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vfloat64m8_t test_vmerge_vvm_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vcompress.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vcompress.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vcompress.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vcompress.c @@ -11,8 +11,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vcompress_vm_i8mf8_tu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, size_t vl) { - return vcompress_vm_i8mf8_tu(mask, maskedoff, src, vl); +vint8mf8_t test_vcompress_vm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_i8mf8_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf4_tu( @@ -20,8 +20,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vcompress_vm_i8mf4_tu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, size_t vl) { - return vcompress_vm_i8mf4_tu(mask, maskedoff, src, vl); +vint8mf4_t test_vcompress_vm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_i8mf4_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf2_tu( @@ -29,8 +29,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vcompress_vm_i8mf2_tu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, size_t vl) { - return vcompress_vm_i8mf2_tu(mask, maskedoff, src, vl); +vint8mf2_t test_vcompress_vm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_i8mf2_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m1_tu( @@ -38,8 +38,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vcompress_vm_i8m1_tu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, size_t vl) { - return vcompress_vm_i8m1_tu(mask, maskedoff, src, vl); +vint8m1_t test_vcompress_vm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_i8m1_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m2_tu( @@ -47,8 +47,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vcompress_vm_i8m2_tu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, size_t vl) { - return vcompress_vm_i8m2_tu(mask, maskedoff, src, vl); +vint8m2_t test_vcompress_vm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_i8m2_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m4_tu( @@ -56,8 +56,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vcompress_vm_i8m4_tu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, size_t vl) { - return vcompress_vm_i8m4_tu(mask, maskedoff, src, vl); +vint8m4_t test_vcompress_vm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_i8m4_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m8_tu( @@ -65,8 +65,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vcompress_vm_i8m8_tu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, size_t vl) { - return vcompress_vm_i8m8_tu(mask, maskedoff, src, vl); +vint8m8_t test_vcompress_vm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t src, vbool1_t mask, size_t vl) { + return vcompress_vm_i8m8_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf4_tu( @@ -74,8 +74,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vcompress_vm_i16mf4_tu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, size_t vl) { - return vcompress_vm_i16mf4_tu(mask, maskedoff, src, vl); +vint16mf4_t test_vcompress_vm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_i16mf4_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf2_tu( @@ -83,8 +83,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vcompress_vm_i16mf2_tu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, size_t vl) { - return vcompress_vm_i16mf2_tu(mask, maskedoff, src, vl); +vint16mf2_t test_vcompress_vm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_i16mf2_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m1_tu( @@ -92,8 +92,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vcompress_vm_i16m1_tu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, size_t vl) { - return vcompress_vm_i16m1_tu(mask, maskedoff, src, vl); +vint16m1_t test_vcompress_vm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_i16m1_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m2_tu( @@ -101,8 +101,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vcompress_vm_i16m2_tu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, size_t vl) { - return vcompress_vm_i16m2_tu(mask, maskedoff, src, vl); +vint16m2_t test_vcompress_vm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_i16m2_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m4_tu( @@ -110,8 +110,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vcompress_vm_i16m4_tu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, size_t vl) { - return vcompress_vm_i16m4_tu(mask, maskedoff, src, vl); +vint16m4_t test_vcompress_vm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_i16m4_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m8_tu( @@ -119,8 +119,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vcompress_vm_i16m8_tu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, size_t vl) { - return vcompress_vm_i16m8_tu(mask, maskedoff, src, vl); +vint16m8_t test_vcompress_vm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_i16m8_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2_tu( @@ -128,8 +128,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vcompress_vm_i32mf2_tu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, size_t vl) { - return vcompress_vm_i32mf2_tu(mask, maskedoff, src, vl); +vint32mf2_t test_vcompress_vm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_i32mf2_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m1_tu( @@ -137,8 +137,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vcompress_vm_i32m1_tu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, size_t vl) { - return vcompress_vm_i32m1_tu(mask, maskedoff, src, vl); +vint32m1_t test_vcompress_vm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_i32m1_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m2_tu( @@ -146,8 +146,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vcompress_vm_i32m2_tu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, size_t vl) { - return vcompress_vm_i32m2_tu(mask, maskedoff, src, vl); +vint32m2_t test_vcompress_vm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_i32m2_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m4_tu( @@ -155,8 +155,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vcompress_vm_i32m4_tu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, size_t vl) { - return vcompress_vm_i32m4_tu(mask, maskedoff, src, vl); +vint32m4_t test_vcompress_vm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_i32m4_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m8_tu( @@ -164,8 +164,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vcompress_vm_i32m8_tu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, size_t vl) { - return vcompress_vm_i32m8_tu(mask, maskedoff, src, vl); +vint32m8_t test_vcompress_vm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_i32m8_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m1_tu( @@ -173,8 +173,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vcompress_vm_i64m1_tu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, size_t vl) { - return vcompress_vm_i64m1_tu(mask, maskedoff, src, vl); +vint64m1_t test_vcompress_vm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_i64m1_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m2_tu( @@ -182,8 +182,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vcompress_vm_i64m2_tu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, size_t vl) { - return vcompress_vm_i64m2_tu(mask, maskedoff, src, vl); +vint64m2_t test_vcompress_vm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_i64m2_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m4_tu( @@ -191,8 +191,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vcompress_vm_i64m4_tu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, size_t vl) { - return vcompress_vm_i64m4_tu(mask, maskedoff, src, vl); +vint64m4_t test_vcompress_vm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_i64m4_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m8_tu( @@ -200,8 +200,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vcompress_vm_i64m8_tu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, size_t vl) { - return vcompress_vm_i64m8_tu(mask, maskedoff, src, vl); +vint64m8_t test_vcompress_vm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_i64m8_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf8_tu( @@ -209,8 +209,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vcompress_vm_u8mf8_tu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, size_t vl) { - return vcompress_vm_u8mf8_tu(mask, maskedoff, src, vl); +vuint8mf8_t test_vcompress_vm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_u8mf8_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf4_tu( @@ -218,8 +218,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vcompress_vm_u8mf4_tu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, size_t vl) { - return vcompress_vm_u8mf4_tu(mask, maskedoff, src, vl); +vuint8mf4_t test_vcompress_vm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_u8mf4_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf2_tu( @@ -227,8 +227,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vcompress_vm_u8mf2_tu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, size_t vl) { - return vcompress_vm_u8mf2_tu(mask, maskedoff, src, vl); +vuint8mf2_t test_vcompress_vm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_u8mf2_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m1_tu( @@ -236,8 +236,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vcompress_vm_u8m1_tu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, size_t vl) { - return vcompress_vm_u8m1_tu(mask, maskedoff, src, vl); +vuint8m1_t test_vcompress_vm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_u8m1_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m2_tu( @@ -245,8 +245,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vcompress_vm_u8m2_tu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, size_t vl) { - return vcompress_vm_u8m2_tu(mask, maskedoff, src, vl); +vuint8m2_t test_vcompress_vm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_u8m2_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m4_tu( @@ -254,8 +254,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vcompress_vm_u8m4_tu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, size_t vl) { - return vcompress_vm_u8m4_tu(mask, maskedoff, src, vl); +vuint8m4_t test_vcompress_vm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_u8m4_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m8_tu( @@ -263,8 +263,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vcompress_vm_u8m8_tu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, size_t vl) { - return vcompress_vm_u8m8_tu(mask, maskedoff, src, vl); +vuint8m8_t test_vcompress_vm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t src, vbool1_t mask, size_t vl) { + return vcompress_vm_u8m8_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf4_tu( @@ -272,8 +272,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vcompress_vm_u16mf4_tu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, size_t vl) { - return vcompress_vm_u16mf4_tu(mask, maskedoff, src, vl); +vuint16mf4_t test_vcompress_vm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_u16mf4_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf2_tu( @@ -281,8 +281,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vcompress_vm_u16mf2_tu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, size_t vl) { - return vcompress_vm_u16mf2_tu(mask, maskedoff, src, vl); +vuint16mf2_t test_vcompress_vm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_u16mf2_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m1_tu( @@ -290,8 +290,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vcompress_vm_u16m1_tu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, size_t vl) { - return vcompress_vm_u16m1_tu(mask, maskedoff, src, vl); +vuint16m1_t test_vcompress_vm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_u16m1_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m2_tu( @@ -299,8 +299,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vcompress_vm_u16m2_tu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, size_t vl) { - return vcompress_vm_u16m2_tu(mask, maskedoff, src, vl); +vuint16m2_t test_vcompress_vm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_u16m2_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m4_tu( @@ -308,8 +308,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vcompress_vm_u16m4_tu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, size_t vl) { - return vcompress_vm_u16m4_tu(mask, maskedoff, src, vl); +vuint16m4_t test_vcompress_vm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_u16m4_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m8_tu( @@ -317,8 +317,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vcompress_vm_u16m8_tu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, size_t vl) { - return vcompress_vm_u16m8_tu(mask, maskedoff, src, vl); +vuint16m8_t test_vcompress_vm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_u16m8_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2_tu( @@ -326,8 +326,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vcompress_vm_u32mf2_tu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, size_t vl) { - return vcompress_vm_u32mf2_tu(mask, maskedoff, src, vl); +vuint32mf2_t test_vcompress_vm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_u32mf2_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m1_tu( @@ -335,8 +335,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vcompress_vm_u32m1_tu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, size_t vl) { - return vcompress_vm_u32m1_tu(mask, maskedoff, src, vl); +vuint32m1_t test_vcompress_vm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_u32m1_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m2_tu( @@ -344,8 +344,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vcompress_vm_u32m2_tu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, size_t vl) { - return vcompress_vm_u32m2_tu(mask, maskedoff, src, vl); +vuint32m2_t test_vcompress_vm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_u32m2_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m4_tu( @@ -353,8 +353,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vcompress_vm_u32m4_tu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, size_t vl) { - return vcompress_vm_u32m4_tu(mask, maskedoff, src, vl); +vuint32m4_t test_vcompress_vm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_u32m4_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m8_tu( @@ -362,8 +362,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vcompress_vm_u32m8_tu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, size_t vl) { - return vcompress_vm_u32m8_tu(mask, maskedoff, src, vl); +vuint32m8_t test_vcompress_vm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_u32m8_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m1_tu( @@ -371,8 +371,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vcompress_vm_u64m1_tu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, size_t vl) { - return vcompress_vm_u64m1_tu(mask, maskedoff, src, vl); +vuint64m1_t test_vcompress_vm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_u64m1_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m2_tu( @@ -380,8 +380,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vcompress_vm_u64m2_tu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, size_t vl) { - return vcompress_vm_u64m2_tu(mask, maskedoff, src, vl); +vuint64m2_t test_vcompress_vm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_u64m2_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m4_tu( @@ -389,8 +389,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vcompress_vm_u64m4_tu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, size_t vl) { - return vcompress_vm_u64m4_tu(mask, maskedoff, src, vl); +vuint64m4_t test_vcompress_vm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_u64m4_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m8_tu( @@ -398,8 +398,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vcompress_vm_u64m8_tu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, size_t vl) { - return vcompress_vm_u64m8_tu(mask, maskedoff, src, vl); +vuint64m8_t test_vcompress_vm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_u64m8_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16mf4_tu( @@ -407,8 +407,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vcompress_vm_f16mf4_tu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vcompress_vm_f16mf4_tu(mask, maskedoff, src, vl); +vfloat16mf4_t test_vcompress_vm_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_f16mf4_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16mf2_tu( @@ -416,8 +416,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vcompress_vm_f16mf2_tu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vcompress_vm_f16mf2_tu(mask, maskedoff, src, vl); +vfloat16mf2_t test_vcompress_vm_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_f16mf2_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16m1_tu( @@ -425,8 +425,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vcompress_vm_f16m1_tu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return vcompress_vm_f16m1_tu(mask, maskedoff, src, vl); +vfloat16m1_t test_vcompress_vm_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_f16m1_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16m2_tu( @@ -434,8 +434,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vcompress_vm_f16m2_tu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return vcompress_vm_f16m2_tu(mask, maskedoff, src, vl); +vfloat16m2_t test_vcompress_vm_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_f16m2_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16m4_tu( @@ -443,8 +443,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vcompress_vm_f16m4_tu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return vcompress_vm_f16m4_tu(mask, maskedoff, src, vl); +vfloat16m4_t test_vcompress_vm_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_f16m4_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16m8_tu( @@ -452,8 +452,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vcompress_vm_f16m8_tu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return vcompress_vm_f16m8_tu(mask, maskedoff, src, vl); +vfloat16m8_t test_vcompress_vm_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_f16m8_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2_tu( @@ -461,8 +461,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vcompress_vm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vcompress_vm_f32mf2_tu(mask, maskedoff, src, vl); +vfloat32mf2_t test_vcompress_vm_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_f32mf2_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m1_tu( @@ -470,8 +470,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vcompress_vm_f32m1_tu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return vcompress_vm_f32m1_tu(mask, maskedoff, src, vl); +vfloat32m1_t test_vcompress_vm_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_f32m1_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m2_tu( @@ -479,8 +479,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vcompress_vm_f32m2_tu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return vcompress_vm_f32m2_tu(mask, maskedoff, src, vl); +vfloat32m2_t test_vcompress_vm_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_f32m2_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m4_tu( @@ -488,8 +488,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vcompress_vm_f32m4_tu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return vcompress_vm_f32m4_tu(mask, maskedoff, src, vl); +vfloat32m4_t test_vcompress_vm_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_f32m4_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m8_tu( @@ -497,8 +497,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vcompress_vm_f32m8_tu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return vcompress_vm_f32m8_tu(mask, maskedoff, src, vl); +vfloat32m8_t test_vcompress_vm_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_f32m8_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m1_tu( @@ -506,8 +506,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vcompress_vm_f64m1_tu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return vcompress_vm_f64m1_tu(mask, maskedoff, src, vl); +vfloat64m1_t test_vcompress_vm_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_f64m1_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m2_tu( @@ -515,8 +515,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vcompress_vm_f64m2_tu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return vcompress_vm_f64m2_tu(mask, maskedoff, src, vl); +vfloat64m2_t test_vcompress_vm_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_f64m2_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m4_tu( @@ -524,8 +524,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vcompress_vm_f64m4_tu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return vcompress_vm_f64m4_tu(mask, maskedoff, src, vl); +vfloat64m4_t test_vcompress_vm_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_f64m4_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m8_tu( @@ -533,8 +533,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vcompress_vm_f64m8_tu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return vcompress_vm_f64m8_tu(mask, maskedoff, src, vl); +vfloat64m8_t test_vcompress_vm_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_f64m8_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf8_ta( @@ -542,8 +542,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vcompress_vm_i8mf8_ta(vbool64_t mask, vint8mf8_t src, size_t vl) { - return vcompress_vm_i8mf8_ta(mask, src, vl); +vint8mf8_t test_vcompress_vm_i8mf8_ta(vint8mf8_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_i8mf8_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf4_ta( @@ -551,8 +551,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vcompress_vm_i8mf4_ta(vbool32_t mask, vint8mf4_t src, size_t vl) { - return vcompress_vm_i8mf4_ta(mask, src, vl); +vint8mf4_t test_vcompress_vm_i8mf4_ta(vint8mf4_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_i8mf4_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf2_ta( @@ -560,8 +560,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vcompress_vm_i8mf2_ta(vbool16_t mask, vint8mf2_t src, size_t vl) { - return vcompress_vm_i8mf2_ta(mask, src, vl); +vint8mf2_t test_vcompress_vm_i8mf2_ta(vint8mf2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_i8mf2_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m1_ta( @@ -569,8 +569,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vcompress_vm_i8m1_ta(vbool8_t mask, vint8m1_t src, size_t vl) { - return vcompress_vm_i8m1_ta(mask, src, vl); +vint8m1_t test_vcompress_vm_i8m1_ta(vint8m1_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_i8m1_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m2_ta( @@ -578,8 +578,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vcompress_vm_i8m2_ta(vbool4_t mask, vint8m2_t src, size_t vl) { - return vcompress_vm_i8m2_ta(mask, src, vl); +vint8m2_t test_vcompress_vm_i8m2_ta(vint8m2_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_i8m2_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m4_ta( @@ -587,8 +587,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vcompress_vm_i8m4_ta(vbool2_t mask, vint8m4_t src, size_t vl) { - return vcompress_vm_i8m4_ta(mask, src, vl); +vint8m4_t test_vcompress_vm_i8m4_ta(vint8m4_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_i8m4_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m8_ta( @@ -596,8 +596,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv64i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vcompress_vm_i8m8_ta(vbool1_t mask, vint8m8_t src, size_t vl) { - return vcompress_vm_i8m8_ta(mask, src, vl); +vint8m8_t test_vcompress_vm_i8m8_ta(vint8m8_t src, vbool1_t mask, size_t vl) { + return vcompress_vm_i8m8_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf4_ta( @@ -605,8 +605,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vcompress_vm_i16mf4_ta(vbool64_t mask, vint16mf4_t src, size_t vl) { - return vcompress_vm_i16mf4_ta(mask, src, vl); +vint16mf4_t test_vcompress_vm_i16mf4_ta(vint16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_i16mf4_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf2_ta( @@ -614,8 +614,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vcompress_vm_i16mf2_ta(vbool32_t mask, vint16mf2_t src, size_t vl) { - return vcompress_vm_i16mf2_ta(mask, src, vl); +vint16mf2_t test_vcompress_vm_i16mf2_ta(vint16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_i16mf2_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m1_ta( @@ -623,8 +623,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vcompress_vm_i16m1_ta(vbool16_t mask, vint16m1_t src, size_t vl) { - return vcompress_vm_i16m1_ta(mask, src, vl); +vint16m1_t test_vcompress_vm_i16m1_ta(vint16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_i16m1_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m2_ta( @@ -632,8 +632,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vcompress_vm_i16m2_ta(vbool8_t mask, vint16m2_t src, size_t vl) { - return vcompress_vm_i16m2_ta(mask, src, vl); +vint16m2_t test_vcompress_vm_i16m2_ta(vint16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_i16m2_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m4_ta( @@ -641,8 +641,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vcompress_vm_i16m4_ta(vbool4_t mask, vint16m4_t src, size_t vl) { - return vcompress_vm_i16m4_ta(mask, src, vl); +vint16m4_t test_vcompress_vm_i16m4_ta(vint16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_i16m4_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m8_ta( @@ -650,8 +650,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vcompress_vm_i16m8_ta(vbool2_t mask, vint16m8_t src, size_t vl) { - return vcompress_vm_i16m8_ta(mask, src, vl); +vint16m8_t test_vcompress_vm_i16m8_ta(vint16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_i16m8_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2_ta( @@ -659,8 +659,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vcompress_vm_i32mf2_ta(vbool64_t mask, vint32mf2_t src, size_t vl) { - return vcompress_vm_i32mf2_ta(mask, src, vl); +vint32mf2_t test_vcompress_vm_i32mf2_ta(vint32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_i32mf2_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m1_ta( @@ -668,8 +668,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vcompress_vm_i32m1_ta(vbool32_t mask, vint32m1_t src, size_t vl) { - return vcompress_vm_i32m1_ta(mask, src, vl); +vint32m1_t test_vcompress_vm_i32m1_ta(vint32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_i32m1_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m2_ta( @@ -677,8 +677,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vcompress_vm_i32m2_ta(vbool16_t mask, vint32m2_t src, size_t vl) { - return vcompress_vm_i32m2_ta(mask, src, vl); +vint32m2_t test_vcompress_vm_i32m2_ta(vint32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_i32m2_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m4_ta( @@ -686,8 +686,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vcompress_vm_i32m4_ta(vbool8_t mask, vint32m4_t src, size_t vl) { - return vcompress_vm_i32m4_ta(mask, src, vl); +vint32m4_t test_vcompress_vm_i32m4_ta(vint32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_i32m4_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m8_ta( @@ -695,8 +695,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vcompress_vm_i32m8_ta(vbool4_t mask, vint32m8_t src, size_t vl) { - return vcompress_vm_i32m8_ta(mask, src, vl); +vint32m8_t test_vcompress_vm_i32m8_ta(vint32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_i32m8_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m1_ta( @@ -704,8 +704,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vcompress_vm_i64m1_ta(vbool64_t mask, vint64m1_t src, size_t vl) { - return vcompress_vm_i64m1_ta(mask, src, vl); +vint64m1_t test_vcompress_vm_i64m1_ta(vint64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_i64m1_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m2_ta( @@ -713,8 +713,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vcompress_vm_i64m2_ta(vbool32_t mask, vint64m2_t src, size_t vl) { - return vcompress_vm_i64m2_ta(mask, src, vl); +vint64m2_t test_vcompress_vm_i64m2_ta(vint64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_i64m2_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m4_ta( @@ -722,8 +722,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vcompress_vm_i64m4_ta(vbool16_t mask, vint64m4_t src, size_t vl) { - return vcompress_vm_i64m4_ta(mask, src, vl); +vint64m4_t test_vcompress_vm_i64m4_ta(vint64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_i64m4_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m8_ta( @@ -731,8 +731,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vcompress_vm_i64m8_ta(vbool8_t mask, vint64m8_t src, size_t vl) { - return vcompress_vm_i64m8_ta(mask, src, vl); +vint64m8_t test_vcompress_vm_i64m8_ta(vint64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_i64m8_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf8_ta( @@ -740,8 +740,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vcompress_vm_u8mf8_ta(vbool64_t mask, vuint8mf8_t src, size_t vl) { - return vcompress_vm_u8mf8_ta(mask, src, vl); +vuint8mf8_t test_vcompress_vm_u8mf8_ta(vuint8mf8_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_u8mf8_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf4_ta( @@ -749,8 +749,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vcompress_vm_u8mf4_ta(vbool32_t mask, vuint8mf4_t src, size_t vl) { - return vcompress_vm_u8mf4_ta(mask, src, vl); +vuint8mf4_t test_vcompress_vm_u8mf4_ta(vuint8mf4_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_u8mf4_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf2_ta( @@ -758,8 +758,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vcompress_vm_u8mf2_ta(vbool16_t mask, vuint8mf2_t src, size_t vl) { - return vcompress_vm_u8mf2_ta(mask, src, vl); +vuint8mf2_t test_vcompress_vm_u8mf2_ta(vuint8mf2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_u8mf2_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m1_ta( @@ -767,8 +767,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vcompress_vm_u8m1_ta(vbool8_t mask, vuint8m1_t src, size_t vl) { - return vcompress_vm_u8m1_ta(mask, src, vl); +vuint8m1_t test_vcompress_vm_u8m1_ta(vuint8m1_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_u8m1_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m2_ta( @@ -776,8 +776,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vcompress_vm_u8m2_ta(vbool4_t mask, vuint8m2_t src, size_t vl) { - return vcompress_vm_u8m2_ta(mask, src, vl); +vuint8m2_t test_vcompress_vm_u8m2_ta(vuint8m2_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_u8m2_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m4_ta( @@ -785,8 +785,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vcompress_vm_u8m4_ta(vbool2_t mask, vuint8m4_t src, size_t vl) { - return vcompress_vm_u8m4_ta(mask, src, vl); +vuint8m4_t test_vcompress_vm_u8m4_ta(vuint8m4_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_u8m4_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m8_ta( @@ -794,8 +794,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv64i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vcompress_vm_u8m8_ta(vbool1_t mask, vuint8m8_t src, size_t vl) { - return vcompress_vm_u8m8_ta(mask, src, vl); +vuint8m8_t test_vcompress_vm_u8m8_ta(vuint8m8_t src, vbool1_t mask, size_t vl) { + return vcompress_vm_u8m8_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf4_ta( @@ -803,8 +803,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vcompress_vm_u16mf4_ta(vbool64_t mask, vuint16mf4_t src, size_t vl) { - return vcompress_vm_u16mf4_ta(mask, src, vl); +vuint16mf4_t test_vcompress_vm_u16mf4_ta(vuint16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_u16mf4_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf2_ta( @@ -812,8 +812,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vcompress_vm_u16mf2_ta(vbool32_t mask, vuint16mf2_t src, size_t vl) { - return vcompress_vm_u16mf2_ta(mask, src, vl); +vuint16mf2_t test_vcompress_vm_u16mf2_ta(vuint16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_u16mf2_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m1_ta( @@ -821,8 +821,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vcompress_vm_u16m1_ta(vbool16_t mask, vuint16m1_t src, size_t vl) { - return vcompress_vm_u16m1_ta(mask, src, vl); +vuint16m1_t test_vcompress_vm_u16m1_ta(vuint16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_u16m1_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m2_ta( @@ -830,8 +830,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vcompress_vm_u16m2_ta(vbool8_t mask, vuint16m2_t src, size_t vl) { - return vcompress_vm_u16m2_ta(mask, src, vl); +vuint16m2_t test_vcompress_vm_u16m2_ta(vuint16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_u16m2_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m4_ta( @@ -839,8 +839,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vcompress_vm_u16m4_ta(vbool4_t mask, vuint16m4_t src, size_t vl) { - return vcompress_vm_u16m4_ta(mask, src, vl); +vuint16m4_t test_vcompress_vm_u16m4_ta(vuint16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_u16m4_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m8_ta( @@ -848,8 +848,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vcompress_vm_u16m8_ta(vbool2_t mask, vuint16m8_t src, size_t vl) { - return vcompress_vm_u16m8_ta(mask, src, vl); +vuint16m8_t test_vcompress_vm_u16m8_ta(vuint16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_u16m8_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2_ta( @@ -857,8 +857,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vcompress_vm_u32mf2_ta(vbool64_t mask, vuint32mf2_t src, size_t vl) { - return vcompress_vm_u32mf2_ta(mask, src, vl); +vuint32mf2_t test_vcompress_vm_u32mf2_ta(vuint32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_u32mf2_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m1_ta( @@ -866,8 +866,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vcompress_vm_u32m1_ta(vbool32_t mask, vuint32m1_t src, size_t vl) { - return vcompress_vm_u32m1_ta(mask, src, vl); +vuint32m1_t test_vcompress_vm_u32m1_ta(vuint32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_u32m1_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m2_ta( @@ -875,8 +875,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vcompress_vm_u32m2_ta(vbool16_t mask, vuint32m2_t src, size_t vl) { - return vcompress_vm_u32m2_ta(mask, src, vl); +vuint32m2_t test_vcompress_vm_u32m2_ta(vuint32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_u32m2_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m4_ta( @@ -884,8 +884,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vcompress_vm_u32m4_ta(vbool8_t mask, vuint32m4_t src, size_t vl) { - return vcompress_vm_u32m4_ta(mask, src, vl); +vuint32m4_t test_vcompress_vm_u32m4_ta(vuint32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_u32m4_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m8_ta( @@ -893,8 +893,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vcompress_vm_u32m8_ta(vbool4_t mask, vuint32m8_t src, size_t vl) { - return vcompress_vm_u32m8_ta(mask, src, vl); +vuint32m8_t test_vcompress_vm_u32m8_ta(vuint32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_u32m8_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m1_ta( @@ -902,8 +902,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vcompress_vm_u64m1_ta(vbool64_t mask, vuint64m1_t src, size_t vl) { - return vcompress_vm_u64m1_ta(mask, src, vl); +vuint64m1_t test_vcompress_vm_u64m1_ta(vuint64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_u64m1_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m2_ta( @@ -911,8 +911,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vcompress_vm_u64m2_ta(vbool32_t mask, vuint64m2_t src, size_t vl) { - return vcompress_vm_u64m2_ta(mask, src, vl); +vuint64m2_t test_vcompress_vm_u64m2_ta(vuint64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_u64m2_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m4_ta( @@ -920,8 +920,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vcompress_vm_u64m4_ta(vbool16_t mask, vuint64m4_t src, size_t vl) { - return vcompress_vm_u64m4_ta(mask, src, vl); +vuint64m4_t test_vcompress_vm_u64m4_ta(vuint64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_u64m4_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m8_ta( @@ -929,8 +929,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vcompress_vm_u64m8_ta(vbool8_t mask, vuint64m8_t src, size_t vl) { - return vcompress_vm_u64m8_ta(mask, src, vl); +vuint64m8_t test_vcompress_vm_u64m8_ta(vuint64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_u64m8_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16mf4_ta( @@ -938,8 +938,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vcompress_vm_f16mf4_ta(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return vcompress_vm_f16mf4_ta(mask, src, vl); +vfloat16mf4_t test_vcompress_vm_f16mf4_ta(vfloat16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_f16mf4_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16mf2_ta( @@ -947,8 +947,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vcompress_vm_f16mf2_ta(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return vcompress_vm_f16mf2_ta(mask, src, vl); +vfloat16mf2_t test_vcompress_vm_f16mf2_ta(vfloat16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_f16mf2_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16m1_ta( @@ -956,8 +956,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vcompress_vm_f16m1_ta(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return vcompress_vm_f16m1_ta(mask, src, vl); +vfloat16m1_t test_vcompress_vm_f16m1_ta(vfloat16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_f16m1_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16m2_ta( @@ -965,8 +965,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vcompress_vm_f16m2_ta(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return vcompress_vm_f16m2_ta(mask, src, vl); +vfloat16m2_t test_vcompress_vm_f16m2_ta(vfloat16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_f16m2_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16m4_ta( @@ -974,8 +974,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vcompress_vm_f16m4_ta(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return vcompress_vm_f16m4_ta(mask, src, vl); +vfloat16m4_t test_vcompress_vm_f16m4_ta(vfloat16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_f16m4_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16m8_ta( @@ -983,8 +983,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vcompress_vm_f16m8_ta(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return vcompress_vm_f16m8_ta(mask, src, vl); +vfloat16m8_t test_vcompress_vm_f16m8_ta(vfloat16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_f16m8_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2_ta( @@ -992,8 +992,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vcompress_vm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return vcompress_vm_f32mf2_ta(mask, src, vl); +vfloat32mf2_t test_vcompress_vm_f32mf2_ta(vfloat32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_f32mf2_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m1_ta( @@ -1001,8 +1001,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vcompress_vm_f32m1_ta(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return vcompress_vm_f32m1_ta(mask, src, vl); +vfloat32m1_t test_vcompress_vm_f32m1_ta(vfloat32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_f32m1_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m2_ta( @@ -1010,8 +1010,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vcompress_vm_f32m2_ta(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return vcompress_vm_f32m2_ta(mask, src, vl); +vfloat32m2_t test_vcompress_vm_f32m2_ta(vfloat32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_f32m2_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m4_ta( @@ -1019,8 +1019,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vcompress_vm_f32m4_ta(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return vcompress_vm_f32m4_ta(mask, src, vl); +vfloat32m4_t test_vcompress_vm_f32m4_ta(vfloat32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_f32m4_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m8_ta( @@ -1028,8 +1028,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vcompress_vm_f32m8_ta(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return vcompress_vm_f32m8_ta(mask, src, vl); +vfloat32m8_t test_vcompress_vm_f32m8_ta(vfloat32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_f32m8_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m1_ta( @@ -1037,8 +1037,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vcompress_vm_f64m1_ta(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return vcompress_vm_f64m1_ta(mask, src, vl); +vfloat64m1_t test_vcompress_vm_f64m1_ta(vfloat64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_f64m1_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m2_ta( @@ -1046,8 +1046,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vcompress_vm_f64m2_ta(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return vcompress_vm_f64m2_ta(mask, src, vl); +vfloat64m2_t test_vcompress_vm_f64m2_ta(vfloat64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_f64m2_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m4_ta( @@ -1055,8 +1055,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vcompress_vm_f64m4_ta(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return vcompress_vm_f64m4_ta(mask, src, vl); +vfloat64m4_t test_vcompress_vm_f64m4_ta(vfloat64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_f64m4_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m8_ta( @@ -1064,7 +1064,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vcompress_vm_f64m8_ta(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return vcompress_vm_f64m8_ta(mask, src, vl); +vfloat64m8_t test_vcompress_vm_f64m8_ta(vfloat64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_f64m8_ta(src, mask, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmerge.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmerge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmerge.c @@ -11,8 +11,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmerge_vfm_f16mf4_tu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfmerge_vfm_f16mf4_tu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmerge_vfm_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, vbool64_t mask, size_t vl) { + return vfmerge_vfm_f16mf4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16mf2_tu( @@ -20,8 +20,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmerge_vfm_f16mf2_tu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfmerge_vfm_f16mf2_tu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmerge_vfm_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, vbool32_t mask, size_t vl) { + return vfmerge_vfm_f16mf2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m1_tu( @@ -29,8 +29,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmerge_vfm_f16m1_tu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfmerge_vfm_f16m1_tu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmerge_vfm_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, vbool16_t mask, size_t vl) { + return vfmerge_vfm_f16m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m2_tu( @@ -38,8 +38,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmerge_vfm_f16m2_tu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfmerge_vfm_f16m2_tu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmerge_vfm_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, vbool8_t mask, size_t vl) { + return vfmerge_vfm_f16m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m4_tu( @@ -47,8 +47,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmerge_vfm_f16m4_tu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfmerge_vfm_f16m4_tu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmerge_vfm_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, vbool4_t mask, size_t vl) { + return vfmerge_vfm_f16m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m8_tu( @@ -56,8 +56,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmerge_vfm_f16m8_tu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfmerge_vfm_f16m8_tu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmerge_vfm_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, vbool2_t mask, size_t vl) { + return vfmerge_vfm_f16m8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2_tu( @@ -65,8 +65,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmerge_vfm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfmerge_vfm_f32mf2_tu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmerge_vfm_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, vbool64_t mask, size_t vl) { + return vfmerge_vfm_f32mf2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m1_tu( @@ -74,8 +74,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmerge_vfm_f32m1_tu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfmerge_vfm_f32m1_tu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmerge_vfm_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, vbool32_t mask, size_t vl) { + return vfmerge_vfm_f32m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m2_tu( @@ -83,8 +83,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmerge_vfm_f32m2_tu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfmerge_vfm_f32m2_tu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmerge_vfm_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, vbool16_t mask, size_t vl) { + return vfmerge_vfm_f32m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m4_tu( @@ -92,8 +92,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmerge_vfm_f32m4_tu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfmerge_vfm_f32m4_tu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmerge_vfm_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, vbool8_t mask, size_t vl) { + return vfmerge_vfm_f32m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m8_tu( @@ -101,8 +101,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmerge_vfm_f32m8_tu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfmerge_vfm_f32m8_tu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmerge_vfm_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, vbool4_t mask, size_t vl) { + return vfmerge_vfm_f32m8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m1_tu( @@ -110,8 +110,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmerge_vfm_f64m1_tu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfmerge_vfm_f64m1_tu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmerge_vfm_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, vbool64_t mask, size_t vl) { + return vfmerge_vfm_f64m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m2_tu( @@ -119,8 +119,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmerge_vfm_f64m2_tu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfmerge_vfm_f64m2_tu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmerge_vfm_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, vbool32_t mask, size_t vl) { + return vfmerge_vfm_f64m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m4_tu( @@ -128,8 +128,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmerge_vfm_f64m4_tu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfmerge_vfm_f64m4_tu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmerge_vfm_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, vbool16_t mask, size_t vl) { + return vfmerge_vfm_f64m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m8_tu( @@ -137,8 +137,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmerge_vfm_f64m8_tu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfmerge_vfm_f64m8_tu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmerge_vfm_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, vbool8_t mask, size_t vl) { + return vfmerge_vfm_f64m8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16mf4_ta( @@ -146,8 +146,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmerge_vfm_f16mf4_ta(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfmerge_vfm_f16mf4_ta(mask, op1, op2, vl); +vfloat16mf4_t test_vfmerge_vfm_f16mf4_ta(vfloat16mf4_t op1, _Float16 op2, vbool64_t mask, size_t vl) { + return vfmerge_vfm_f16mf4_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16mf2_ta( @@ -155,8 +155,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmerge_vfm_f16mf2_ta(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfmerge_vfm_f16mf2_ta(mask, op1, op2, vl); +vfloat16mf2_t test_vfmerge_vfm_f16mf2_ta(vfloat16mf2_t op1, _Float16 op2, vbool32_t mask, size_t vl) { + return vfmerge_vfm_f16mf2_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m1_ta( @@ -164,8 +164,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmerge_vfm_f16m1_ta(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfmerge_vfm_f16m1_ta(mask, op1, op2, vl); +vfloat16m1_t test_vfmerge_vfm_f16m1_ta(vfloat16m1_t op1, _Float16 op2, vbool16_t mask, size_t vl) { + return vfmerge_vfm_f16m1_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m2_ta( @@ -173,8 +173,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmerge_vfm_f16m2_ta(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfmerge_vfm_f16m2_ta(mask, op1, op2, vl); +vfloat16m2_t test_vfmerge_vfm_f16m2_ta(vfloat16m2_t op1, _Float16 op2, vbool8_t mask, size_t vl) { + return vfmerge_vfm_f16m2_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m4_ta( @@ -182,8 +182,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmerge_vfm_f16m4_ta(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfmerge_vfm_f16m4_ta(mask, op1, op2, vl); +vfloat16m4_t test_vfmerge_vfm_f16m4_ta(vfloat16m4_t op1, _Float16 op2, vbool4_t mask, size_t vl) { + return vfmerge_vfm_f16m4_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m8_ta( @@ -191,8 +191,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmerge_vfm_f16m8_ta(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfmerge_vfm_f16m8_ta(mask, op1, op2, vl); +vfloat16m8_t test_vfmerge_vfm_f16m8_ta(vfloat16m8_t op1, _Float16 op2, vbool2_t mask, size_t vl) { + return vfmerge_vfm_f16m8_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2_ta( @@ -200,8 +200,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmerge_vfm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { - return vfmerge_vfm_f32mf2_ta(mask, op1, op2, vl); +vfloat32mf2_t test_vfmerge_vfm_f32mf2_ta(vfloat32mf2_t op1, float op2, vbool64_t mask, size_t vl) { + return vfmerge_vfm_f32mf2_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m1_ta( @@ -209,8 +209,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmerge_vfm_f32m1_ta(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return vfmerge_vfm_f32m1_ta(mask, op1, op2, vl); +vfloat32m1_t test_vfmerge_vfm_f32m1_ta(vfloat32m1_t op1, float op2, vbool32_t mask, size_t vl) { + return vfmerge_vfm_f32m1_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m2_ta( @@ -218,8 +218,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmerge_vfm_f32m2_ta(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return vfmerge_vfm_f32m2_ta(mask, op1, op2, vl); +vfloat32m2_t test_vfmerge_vfm_f32m2_ta(vfloat32m2_t op1, float op2, vbool16_t mask, size_t vl) { + return vfmerge_vfm_f32m2_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m4_ta( @@ -227,8 +227,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmerge_vfm_f32m4_ta(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return vfmerge_vfm_f32m4_ta(mask, op1, op2, vl); +vfloat32m4_t test_vfmerge_vfm_f32m4_ta(vfloat32m4_t op1, float op2, vbool8_t mask, size_t vl) { + return vfmerge_vfm_f32m4_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m8_ta( @@ -236,8 +236,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmerge_vfm_f32m8_ta(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { - return vfmerge_vfm_f32m8_ta(mask, op1, op2, vl); +vfloat32m8_t test_vfmerge_vfm_f32m8_ta(vfloat32m8_t op1, float op2, vbool4_t mask, size_t vl) { + return vfmerge_vfm_f32m8_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m1_ta( @@ -245,8 +245,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmerge_vfm_f64m1_ta(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { - return vfmerge_vfm_f64m1_ta(mask, op1, op2, vl); +vfloat64m1_t test_vfmerge_vfm_f64m1_ta(vfloat64m1_t op1, double op2, vbool64_t mask, size_t vl) { + return vfmerge_vfm_f64m1_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m2_ta( @@ -254,8 +254,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmerge_vfm_f64m2_ta(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { - return vfmerge_vfm_f64m2_ta(mask, op1, op2, vl); +vfloat64m2_t test_vfmerge_vfm_f64m2_ta(vfloat64m2_t op1, double op2, vbool32_t mask, size_t vl) { + return vfmerge_vfm_f64m2_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m4_ta( @@ -263,8 +263,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmerge_vfm_f64m4_ta(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { - return vfmerge_vfm_f64m4_ta(mask, op1, op2, vl); +vfloat64m4_t test_vfmerge_vfm_f64m4_ta(vfloat64m4_t op1, double op2, vbool16_t mask, size_t vl) { + return vfmerge_vfm_f64m4_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m8_ta( @@ -272,7 +272,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmerge_vfm_f64m8_ta(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { - return vfmerge_vfm_f64m8_ta(mask, op1, op2, vl); +vfloat64m8_t test_vfmerge_vfm_f64m8_ta(vfloat64m8_t op1, double op2, vbool8_t mask, size_t vl) { + return vfmerge_vfm_f64m8_ta(op1, op2, mask, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmerge.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmerge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmerge.c @@ -11,8 +11,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmerge_vvm_i8mf8_tu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmerge_vvm_i8mf8_tu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmerge_vvm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_i8mf8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf8_tu( @@ -20,8 +20,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmerge_vxm_i8mf8_tu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmerge_vxm_i8mf8_tu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmerge_vxm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_i8mf8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf4_tu( @@ -29,8 +29,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmerge_vvm_i8mf4_tu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmerge_vvm_i8mf4_tu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmerge_vvm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_i8mf4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf4_tu( @@ -38,8 +38,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmerge_vxm_i8mf4_tu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmerge_vxm_i8mf4_tu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmerge_vxm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_i8mf4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf2_tu( @@ -47,8 +47,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmerge_vvm_i8mf2_tu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmerge_vvm_i8mf2_tu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmerge_vvm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_i8mf2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf2_tu( @@ -56,8 +56,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmerge_vxm_i8mf2_tu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmerge_vxm_i8mf2_tu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmerge_vxm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_i8mf2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m1_tu( @@ -65,8 +65,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmerge_vvm_i8m1_tu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmerge_vvm_i8m1_tu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmerge_vvm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_i8m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m1_tu( @@ -74,8 +74,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmerge_vxm_i8m1_tu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmerge_vxm_i8m1_tu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmerge_vxm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_i8m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m2_tu( @@ -83,8 +83,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmerge_vvm_i8m2_tu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmerge_vvm_i8m2_tu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmerge_vvm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_i8m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m2_tu( @@ -92,8 +92,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmerge_vxm_i8m2_tu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmerge_vxm_i8m2_tu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmerge_vxm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_i8m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m4_tu( @@ -101,8 +101,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmerge_vvm_i8m4_tu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmerge_vvm_i8m4_tu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmerge_vvm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_i8m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m4_tu( @@ -110,8 +110,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmerge_vxm_i8m4_tu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmerge_vxm_i8m4_tu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmerge_vxm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vxm_i8m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m8_tu( @@ -119,8 +119,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmerge_vvm_i8m8_tu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmerge_vvm_i8m8_tu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmerge_vvm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, vbool1_t mask, size_t vl) { + return vmerge_vvm_i8m8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m8_tu( @@ -128,8 +128,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmerge_vxm_i8m8_tu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmerge_vxm_i8m8_tu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmerge_vxm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, vbool1_t mask, size_t vl) { + return vmerge_vxm_i8m8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf4_tu( @@ -137,8 +137,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmerge_vvm_i16mf4_tu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmerge_vvm_i16mf4_tu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmerge_vvm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_i16mf4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf4_tu( @@ -146,8 +146,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmerge_vxm_i16mf4_tu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmerge_vxm_i16mf4_tu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmerge_vxm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_i16mf4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf2_tu( @@ -155,8 +155,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmerge_vvm_i16mf2_tu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmerge_vvm_i16mf2_tu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmerge_vvm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_i16mf2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf2_tu( @@ -164,8 +164,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmerge_vxm_i16mf2_tu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmerge_vxm_i16mf2_tu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmerge_vxm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_i16mf2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m1_tu( @@ -173,8 +173,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmerge_vvm_i16m1_tu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmerge_vvm_i16m1_tu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmerge_vvm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_i16m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m1_tu( @@ -182,8 +182,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmerge_vxm_i16m1_tu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmerge_vxm_i16m1_tu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmerge_vxm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_i16m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m2_tu( @@ -191,8 +191,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmerge_vvm_i16m2_tu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmerge_vvm_i16m2_tu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmerge_vvm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_i16m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m2_tu( @@ -200,8 +200,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmerge_vxm_i16m2_tu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmerge_vxm_i16m2_tu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmerge_vxm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_i16m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m4_tu( @@ -209,8 +209,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmerge_vvm_i16m4_tu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmerge_vvm_i16m4_tu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmerge_vvm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_i16m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m4_tu( @@ -218,8 +218,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmerge_vxm_i16m4_tu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmerge_vxm_i16m4_tu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmerge_vxm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_i16m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m8_tu( @@ -227,8 +227,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmerge_vvm_i16m8_tu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmerge_vvm_i16m8_tu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmerge_vvm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_i16m8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m8_tu( @@ -236,8 +236,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmerge_vxm_i16m8_tu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmerge_vxm_i16m8_tu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmerge_vxm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, vbool2_t mask, size_t vl) { + return vmerge_vxm_i16m8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2_tu( @@ -245,8 +245,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmerge_vvm_i32mf2_tu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmerge_vvm_i32mf2_tu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmerge_vvm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_i32mf2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2_tu( @@ -254,8 +254,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmerge_vxm_i32mf2_tu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmerge_vxm_i32mf2_tu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmerge_vxm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_i32mf2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m1_tu( @@ -263,8 +263,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmerge_vvm_i32m1_tu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmerge_vvm_i32m1_tu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmerge_vvm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_i32m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m1_tu( @@ -272,8 +272,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmerge_vxm_i32m1_tu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmerge_vxm_i32m1_tu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmerge_vxm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_i32m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m2_tu( @@ -281,8 +281,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmerge_vvm_i32m2_tu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmerge_vvm_i32m2_tu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmerge_vvm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_i32m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m2_tu( @@ -290,8 +290,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmerge_vxm_i32m2_tu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmerge_vxm_i32m2_tu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmerge_vxm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_i32m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m4_tu( @@ -299,8 +299,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmerge_vvm_i32m4_tu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmerge_vvm_i32m4_tu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmerge_vvm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_i32m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m4_tu( @@ -308,8 +308,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmerge_vxm_i32m4_tu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmerge_vxm_i32m4_tu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmerge_vxm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_i32m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m8_tu( @@ -317,8 +317,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmerge_vvm_i32m8_tu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmerge_vvm_i32m8_tu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmerge_vvm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_i32m8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m8_tu( @@ -326,8 +326,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmerge_vxm_i32m8_tu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmerge_vxm_i32m8_tu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmerge_vxm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_i32m8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m1_tu( @@ -335,8 +335,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmerge_vvm_i64m1_tu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmerge_vvm_i64m1_tu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmerge_vvm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_i64m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m1_tu( @@ -344,8 +344,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmerge_vxm_i64m1_tu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmerge_vxm_i64m1_tu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmerge_vxm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_i64m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m2_tu( @@ -353,8 +353,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmerge_vvm_i64m2_tu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmerge_vvm_i64m2_tu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmerge_vvm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_i64m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m2_tu( @@ -362,8 +362,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmerge_vxm_i64m2_tu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmerge_vxm_i64m2_tu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmerge_vxm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_i64m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m4_tu( @@ -371,8 +371,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmerge_vvm_i64m4_tu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmerge_vvm_i64m4_tu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmerge_vvm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_i64m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m4_tu( @@ -380,8 +380,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmerge_vxm_i64m4_tu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmerge_vxm_i64m4_tu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmerge_vxm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_i64m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m8_tu( @@ -389,8 +389,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmerge_vvm_i64m8_tu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmerge_vvm_i64m8_tu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmerge_vvm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_i64m8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m8_tu( @@ -398,8 +398,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmerge_vxm_i64m8_tu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmerge_vxm_i64m8_tu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmerge_vxm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_i64m8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf8_tu( @@ -407,8 +407,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmerge_vvm_u8mf8_tu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmerge_vvm_u8mf8_tu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmerge_vvm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_u8mf8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf8_tu( @@ -416,8 +416,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmerge_vxm_u8mf8_tu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmerge_vxm_u8mf8_tu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmerge_vxm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_u8mf8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf4_tu( @@ -425,8 +425,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmerge_vvm_u8mf4_tu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmerge_vvm_u8mf4_tu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmerge_vvm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_u8mf4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf4_tu( @@ -434,8 +434,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmerge_vxm_u8mf4_tu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmerge_vxm_u8mf4_tu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmerge_vxm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_u8mf4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf2_tu( @@ -443,8 +443,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmerge_vvm_u8mf2_tu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmerge_vvm_u8mf2_tu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmerge_vvm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_u8mf2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf2_tu( @@ -452,8 +452,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmerge_vxm_u8mf2_tu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmerge_vxm_u8mf2_tu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmerge_vxm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_u8mf2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m1_tu( @@ -461,8 +461,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmerge_vvm_u8m1_tu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmerge_vvm_u8m1_tu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmerge_vvm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_u8m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m1_tu( @@ -470,8 +470,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmerge_vxm_u8m1_tu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmerge_vxm_u8m1_tu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmerge_vxm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_u8m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m2_tu( @@ -479,8 +479,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmerge_vvm_u8m2_tu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmerge_vvm_u8m2_tu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmerge_vvm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_u8m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m2_tu( @@ -488,8 +488,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmerge_vxm_u8m2_tu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmerge_vxm_u8m2_tu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmerge_vxm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_u8m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m4_tu( @@ -497,8 +497,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmerge_vvm_u8m4_tu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmerge_vvm_u8m4_tu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmerge_vvm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_u8m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m4_tu( @@ -506,8 +506,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmerge_vxm_u8m4_tu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmerge_vxm_u8m4_tu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmerge_vxm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vxm_u8m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m8_tu( @@ -515,8 +515,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmerge_vvm_u8m8_tu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmerge_vvm_u8m8_tu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmerge_vvm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, vbool1_t mask, size_t vl) { + return vmerge_vvm_u8m8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m8_tu( @@ -524,8 +524,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmerge_vxm_u8m8_tu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmerge_vxm_u8m8_tu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmerge_vxm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, vbool1_t mask, size_t vl) { + return vmerge_vxm_u8m8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf4_tu( @@ -533,8 +533,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmerge_vvm_u16mf4_tu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmerge_vvm_u16mf4_tu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmerge_vvm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_u16mf4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf4_tu( @@ -542,8 +542,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmerge_vxm_u16mf4_tu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmerge_vxm_u16mf4_tu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmerge_vxm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_u16mf4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf2_tu( @@ -551,8 +551,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmerge_vvm_u16mf2_tu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmerge_vvm_u16mf2_tu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmerge_vvm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_u16mf2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf2_tu( @@ -560,8 +560,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmerge_vxm_u16mf2_tu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmerge_vxm_u16mf2_tu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmerge_vxm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_u16mf2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m1_tu( @@ -569,8 +569,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmerge_vvm_u16m1_tu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmerge_vvm_u16m1_tu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmerge_vvm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_u16m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m1_tu( @@ -578,8 +578,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmerge_vxm_u16m1_tu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmerge_vxm_u16m1_tu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmerge_vxm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_u16m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m2_tu( @@ -587,8 +587,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmerge_vvm_u16m2_tu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmerge_vvm_u16m2_tu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmerge_vvm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_u16m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m2_tu( @@ -596,8 +596,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmerge_vxm_u16m2_tu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmerge_vxm_u16m2_tu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmerge_vxm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_u16m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m4_tu( @@ -605,8 +605,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmerge_vvm_u16m4_tu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmerge_vvm_u16m4_tu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmerge_vvm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_u16m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m4_tu( @@ -614,8 +614,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmerge_vxm_u16m4_tu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmerge_vxm_u16m4_tu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmerge_vxm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_u16m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m8_tu( @@ -623,8 +623,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmerge_vvm_u16m8_tu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmerge_vvm_u16m8_tu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmerge_vvm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_u16m8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m8_tu( @@ -632,8 +632,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmerge_vxm_u16m8_tu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmerge_vxm_u16m8_tu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmerge_vxm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, vbool2_t mask, size_t vl) { + return vmerge_vxm_u16m8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2_tu( @@ -641,8 +641,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmerge_vvm_u32mf2_tu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmerge_vvm_u32mf2_tu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmerge_vvm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_u32mf2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2_tu( @@ -650,8 +650,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmerge_vxm_u32mf2_tu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmerge_vxm_u32mf2_tu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmerge_vxm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_u32mf2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m1_tu( @@ -659,8 +659,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmerge_vvm_u32m1_tu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmerge_vvm_u32m1_tu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmerge_vvm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_u32m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m1_tu( @@ -668,8 +668,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmerge_vxm_u32m1_tu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmerge_vxm_u32m1_tu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmerge_vxm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_u32m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m2_tu( @@ -677,8 +677,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmerge_vvm_u32m2_tu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmerge_vvm_u32m2_tu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmerge_vvm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_u32m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m2_tu( @@ -686,8 +686,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmerge_vxm_u32m2_tu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmerge_vxm_u32m2_tu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmerge_vxm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_u32m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m4_tu( @@ -695,8 +695,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmerge_vvm_u32m4_tu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmerge_vvm_u32m4_tu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmerge_vvm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_u32m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m4_tu( @@ -704,8 +704,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmerge_vxm_u32m4_tu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmerge_vxm_u32m4_tu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmerge_vxm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_u32m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m8_tu( @@ -713,8 +713,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmerge_vvm_u32m8_tu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmerge_vvm_u32m8_tu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmerge_vvm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_u32m8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m8_tu( @@ -722,8 +722,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmerge_vxm_u32m8_tu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmerge_vxm_u32m8_tu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmerge_vxm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_u32m8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m1_tu( @@ -731,8 +731,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmerge_vvm_u64m1_tu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmerge_vvm_u64m1_tu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmerge_vvm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_u64m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m1_tu( @@ -740,8 +740,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmerge_vxm_u64m1_tu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmerge_vxm_u64m1_tu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmerge_vxm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_u64m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m2_tu( @@ -749,8 +749,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmerge_vvm_u64m2_tu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmerge_vvm_u64m2_tu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmerge_vvm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_u64m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m2_tu( @@ -758,8 +758,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmerge_vxm_u64m2_tu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmerge_vxm_u64m2_tu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmerge_vxm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_u64m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m4_tu( @@ -767,8 +767,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmerge_vvm_u64m4_tu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmerge_vvm_u64m4_tu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmerge_vvm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_u64m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m4_tu( @@ -776,8 +776,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmerge_vxm_u64m4_tu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmerge_vxm_u64m4_tu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmerge_vxm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_u64m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m8_tu( @@ -785,8 +785,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmerge_vvm_u64m8_tu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmerge_vvm_u64m8_tu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmerge_vvm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_u64m8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m8_tu( @@ -794,8 +794,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmerge_vxm_u64m8_tu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmerge_vxm_u64m8_tu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmerge_vxm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_u64m8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf8_ta( @@ -803,8 +803,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmerge_vvm_i8mf8_ta(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmerge_vvm_i8mf8_ta(mask, op1, op2, vl); +vint8mf8_t test_vmerge_vvm_i8mf8_ta(vint8mf8_t op1, vint8mf8_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_i8mf8_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf8_ta( @@ -812,8 +812,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmerge_vxm_i8mf8_ta(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmerge_vxm_i8mf8_ta(mask, op1, op2, vl); +vint8mf8_t test_vmerge_vxm_i8mf8_ta(vint8mf8_t op1, int8_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_i8mf8_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf4_ta( @@ -821,8 +821,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmerge_vvm_i8mf4_ta(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmerge_vvm_i8mf4_ta(mask, op1, op2, vl); +vint8mf4_t test_vmerge_vvm_i8mf4_ta(vint8mf4_t op1, vint8mf4_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_i8mf4_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf4_ta( @@ -830,8 +830,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmerge_vxm_i8mf4_ta(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmerge_vxm_i8mf4_ta(mask, op1, op2, vl); +vint8mf4_t test_vmerge_vxm_i8mf4_ta(vint8mf4_t op1, int8_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_i8mf4_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf2_ta( @@ -839,8 +839,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmerge_vvm_i8mf2_ta(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmerge_vvm_i8mf2_ta(mask, op1, op2, vl); +vint8mf2_t test_vmerge_vvm_i8mf2_ta(vint8mf2_t op1, vint8mf2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_i8mf2_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf2_ta( @@ -848,8 +848,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmerge_vxm_i8mf2_ta(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmerge_vxm_i8mf2_ta(mask, op1, op2, vl); +vint8mf2_t test_vmerge_vxm_i8mf2_ta(vint8mf2_t op1, int8_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_i8mf2_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m1_ta( @@ -857,8 +857,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmerge_vvm_i8m1_ta(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmerge_vvm_i8m1_ta(mask, op1, op2, vl); +vint8m1_t test_vmerge_vvm_i8m1_ta(vint8m1_t op1, vint8m1_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_i8m1_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m1_ta( @@ -866,8 +866,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmerge_vxm_i8m1_ta(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return vmerge_vxm_i8m1_ta(mask, op1, op2, vl); +vint8m1_t test_vmerge_vxm_i8m1_ta(vint8m1_t op1, int8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_i8m1_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m2_ta( @@ -875,8 +875,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmerge_vvm_i8m2_ta(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmerge_vvm_i8m2_ta(mask, op1, op2, vl); +vint8m2_t test_vmerge_vvm_i8m2_ta(vint8m2_t op1, vint8m2_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_i8m2_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m2_ta( @@ -884,8 +884,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmerge_vxm_i8m2_ta(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return vmerge_vxm_i8m2_ta(mask, op1, op2, vl); +vint8m2_t test_vmerge_vxm_i8m2_ta(vint8m2_t op1, int8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_i8m2_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m4_ta( @@ -893,8 +893,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmerge_vvm_i8m4_ta(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmerge_vvm_i8m4_ta(mask, op1, op2, vl); +vint8m4_t test_vmerge_vvm_i8m4_ta(vint8m4_t op1, vint8m4_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_i8m4_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m4_ta( @@ -902,8 +902,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmerge_vxm_i8m4_ta(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return vmerge_vxm_i8m4_ta(mask, op1, op2, vl); +vint8m4_t test_vmerge_vxm_i8m4_ta(vint8m4_t op1, int8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vxm_i8m4_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m8_ta( @@ -911,8 +911,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmerge_vvm_i8m8_ta(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmerge_vvm_i8m8_ta(mask, op1, op2, vl); +vint8m8_t test_vmerge_vvm_i8m8_ta(vint8m8_t op1, vint8m8_t op2, vbool1_t mask, size_t vl) { + return vmerge_vvm_i8m8_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m8_ta( @@ -920,8 +920,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmerge_vxm_i8m8_ta(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return vmerge_vxm_i8m8_ta(mask, op1, op2, vl); +vint8m8_t test_vmerge_vxm_i8m8_ta(vint8m8_t op1, int8_t op2, vbool1_t mask, size_t vl) { + return vmerge_vxm_i8m8_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf4_ta( @@ -929,8 +929,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmerge_vvm_i16mf4_ta(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmerge_vvm_i16mf4_ta(mask, op1, op2, vl); +vint16mf4_t test_vmerge_vvm_i16mf4_ta(vint16mf4_t op1, vint16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_i16mf4_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf4_ta( @@ -938,8 +938,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmerge_vxm_i16mf4_ta(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmerge_vxm_i16mf4_ta(mask, op1, op2, vl); +vint16mf4_t test_vmerge_vxm_i16mf4_ta(vint16mf4_t op1, int16_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_i16mf4_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf2_ta( @@ -947,8 +947,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmerge_vvm_i16mf2_ta(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmerge_vvm_i16mf2_ta(mask, op1, op2, vl); +vint16mf2_t test_vmerge_vvm_i16mf2_ta(vint16mf2_t op1, vint16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_i16mf2_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf2_ta( @@ -956,8 +956,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmerge_vxm_i16mf2_ta(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmerge_vxm_i16mf2_ta(mask, op1, op2, vl); +vint16mf2_t test_vmerge_vxm_i16mf2_ta(vint16mf2_t op1, int16_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_i16mf2_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m1_ta( @@ -965,8 +965,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmerge_vvm_i16m1_ta(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmerge_vvm_i16m1_ta(mask, op1, op2, vl); +vint16m1_t test_vmerge_vvm_i16m1_ta(vint16m1_t op1, vint16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_i16m1_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m1_ta( @@ -974,8 +974,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmerge_vxm_i16m1_ta(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return vmerge_vxm_i16m1_ta(mask, op1, op2, vl); +vint16m1_t test_vmerge_vxm_i16m1_ta(vint16m1_t op1, int16_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_i16m1_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m2_ta( @@ -983,8 +983,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmerge_vvm_i16m2_ta(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmerge_vvm_i16m2_ta(mask, op1, op2, vl); +vint16m2_t test_vmerge_vvm_i16m2_ta(vint16m2_t op1, vint16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_i16m2_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m2_ta( @@ -992,8 +992,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmerge_vxm_i16m2_ta(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return vmerge_vxm_i16m2_ta(mask, op1, op2, vl); +vint16m2_t test_vmerge_vxm_i16m2_ta(vint16m2_t op1, int16_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_i16m2_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m4_ta( @@ -1001,8 +1001,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmerge_vvm_i16m4_ta(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmerge_vvm_i16m4_ta(mask, op1, op2, vl); +vint16m4_t test_vmerge_vvm_i16m4_ta(vint16m4_t op1, vint16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_i16m4_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m4_ta( @@ -1010,8 +1010,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmerge_vxm_i16m4_ta(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return vmerge_vxm_i16m4_ta(mask, op1, op2, vl); +vint16m4_t test_vmerge_vxm_i16m4_ta(vint16m4_t op1, int16_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_i16m4_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m8_ta( @@ -1019,8 +1019,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmerge_vvm_i16m8_ta(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmerge_vvm_i16m8_ta(mask, op1, op2, vl); +vint16m8_t test_vmerge_vvm_i16m8_ta(vint16m8_t op1, vint16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_i16m8_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m8_ta( @@ -1028,8 +1028,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmerge_vxm_i16m8_ta(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return vmerge_vxm_i16m8_ta(mask, op1, op2, vl); +vint16m8_t test_vmerge_vxm_i16m8_ta(vint16m8_t op1, int16_t op2, vbool2_t mask, size_t vl) { + return vmerge_vxm_i16m8_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2_ta( @@ -1037,8 +1037,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmerge_vvm_i32mf2_ta(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmerge_vvm_i32mf2_ta(mask, op1, op2, vl); +vint32mf2_t test_vmerge_vvm_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_i32mf2_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2_ta( @@ -1046,8 +1046,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmerge_vxm_i32mf2_ta(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmerge_vxm_i32mf2_ta(mask, op1, op2, vl); +vint32mf2_t test_vmerge_vxm_i32mf2_ta(vint32mf2_t op1, int32_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_i32mf2_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m1_ta( @@ -1055,8 +1055,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmerge_vvm_i32m1_ta(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmerge_vvm_i32m1_ta(mask, op1, op2, vl); +vint32m1_t test_vmerge_vvm_i32m1_ta(vint32m1_t op1, vint32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_i32m1_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m1_ta( @@ -1064,8 +1064,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmerge_vxm_i32m1_ta(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return vmerge_vxm_i32m1_ta(mask, op1, op2, vl); +vint32m1_t test_vmerge_vxm_i32m1_ta(vint32m1_t op1, int32_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_i32m1_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m2_ta( @@ -1073,8 +1073,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmerge_vvm_i32m2_ta(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmerge_vvm_i32m2_ta(mask, op1, op2, vl); +vint32m2_t test_vmerge_vvm_i32m2_ta(vint32m2_t op1, vint32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_i32m2_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m2_ta( @@ -1082,8 +1082,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmerge_vxm_i32m2_ta(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return vmerge_vxm_i32m2_ta(mask, op1, op2, vl); +vint32m2_t test_vmerge_vxm_i32m2_ta(vint32m2_t op1, int32_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_i32m2_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m4_ta( @@ -1091,8 +1091,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmerge_vvm_i32m4_ta(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmerge_vvm_i32m4_ta(mask, op1, op2, vl); +vint32m4_t test_vmerge_vvm_i32m4_ta(vint32m4_t op1, vint32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_i32m4_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m4_ta( @@ -1100,8 +1100,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmerge_vxm_i32m4_ta(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return vmerge_vxm_i32m4_ta(mask, op1, op2, vl); +vint32m4_t test_vmerge_vxm_i32m4_ta(vint32m4_t op1, int32_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_i32m4_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m8_ta( @@ -1109,8 +1109,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmerge_vvm_i32m8_ta(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmerge_vvm_i32m8_ta(mask, op1, op2, vl); +vint32m8_t test_vmerge_vvm_i32m8_ta(vint32m8_t op1, vint32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_i32m8_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m8_ta( @@ -1118,8 +1118,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmerge_vxm_i32m8_ta(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return vmerge_vxm_i32m8_ta(mask, op1, op2, vl); +vint32m8_t test_vmerge_vxm_i32m8_ta(vint32m8_t op1, int32_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_i32m8_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m1_ta( @@ -1127,8 +1127,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmerge_vvm_i64m1_ta(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmerge_vvm_i64m1_ta(mask, op1, op2, vl); +vint64m1_t test_vmerge_vvm_i64m1_ta(vint64m1_t op1, vint64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_i64m1_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m1_ta( @@ -1136,8 +1136,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmerge_vxm_i64m1_ta(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return vmerge_vxm_i64m1_ta(mask, op1, op2, vl); +vint64m1_t test_vmerge_vxm_i64m1_ta(vint64m1_t op1, int64_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_i64m1_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m2_ta( @@ -1145,8 +1145,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmerge_vvm_i64m2_ta(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmerge_vvm_i64m2_ta(mask, op1, op2, vl); +vint64m2_t test_vmerge_vvm_i64m2_ta(vint64m2_t op1, vint64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_i64m2_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m2_ta( @@ -1154,8 +1154,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmerge_vxm_i64m2_ta(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return vmerge_vxm_i64m2_ta(mask, op1, op2, vl); +vint64m2_t test_vmerge_vxm_i64m2_ta(vint64m2_t op1, int64_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_i64m2_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m4_ta( @@ -1163,8 +1163,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmerge_vvm_i64m4_ta(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmerge_vvm_i64m4_ta(mask, op1, op2, vl); +vint64m4_t test_vmerge_vvm_i64m4_ta(vint64m4_t op1, vint64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_i64m4_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m4_ta( @@ -1172,8 +1172,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmerge_vxm_i64m4_ta(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return vmerge_vxm_i64m4_ta(mask, op1, op2, vl); +vint64m4_t test_vmerge_vxm_i64m4_ta(vint64m4_t op1, int64_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_i64m4_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m8_ta( @@ -1181,8 +1181,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmerge_vvm_i64m8_ta(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmerge_vvm_i64m8_ta(mask, op1, op2, vl); +vint64m8_t test_vmerge_vvm_i64m8_ta(vint64m8_t op1, vint64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_i64m8_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m8_ta( @@ -1190,8 +1190,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmerge_vxm_i64m8_ta(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return vmerge_vxm_i64m8_ta(mask, op1, op2, vl); +vint64m8_t test_vmerge_vxm_i64m8_ta(vint64m8_t op1, int64_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_i64m8_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf8_ta( @@ -1199,8 +1199,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmerge_vvm_u8mf8_ta(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmerge_vvm_u8mf8_ta(mask, op1, op2, vl); +vuint8mf8_t test_vmerge_vvm_u8mf8_ta(vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_u8mf8_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf8_ta( @@ -1208,8 +1208,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmerge_vxm_u8mf8_ta(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmerge_vxm_u8mf8_ta(mask, op1, op2, vl); +vuint8mf8_t test_vmerge_vxm_u8mf8_ta(vuint8mf8_t op1, uint8_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_u8mf8_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf4_ta( @@ -1217,8 +1217,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmerge_vvm_u8mf4_ta(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmerge_vvm_u8mf4_ta(mask, op1, op2, vl); +vuint8mf4_t test_vmerge_vvm_u8mf4_ta(vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_u8mf4_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf4_ta( @@ -1226,8 +1226,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmerge_vxm_u8mf4_ta(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmerge_vxm_u8mf4_ta(mask, op1, op2, vl); +vuint8mf4_t test_vmerge_vxm_u8mf4_ta(vuint8mf4_t op1, uint8_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_u8mf4_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf2_ta( @@ -1235,8 +1235,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmerge_vvm_u8mf2_ta(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmerge_vvm_u8mf2_ta(mask, op1, op2, vl); +vuint8mf2_t test_vmerge_vvm_u8mf2_ta(vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_u8mf2_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf2_ta( @@ -1244,8 +1244,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmerge_vxm_u8mf2_ta(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmerge_vxm_u8mf2_ta(mask, op1, op2, vl); +vuint8mf2_t test_vmerge_vxm_u8mf2_ta(vuint8mf2_t op1, uint8_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_u8mf2_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m1_ta( @@ -1253,8 +1253,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmerge_vvm_u8m1_ta(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmerge_vvm_u8m1_ta(mask, op1, op2, vl); +vuint8m1_t test_vmerge_vvm_u8m1_ta(vuint8m1_t op1, vuint8m1_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_u8m1_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m1_ta( @@ -1262,8 +1262,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmerge_vxm_u8m1_ta(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmerge_vxm_u8m1_ta(mask, op1, op2, vl); +vuint8m1_t test_vmerge_vxm_u8m1_ta(vuint8m1_t op1, uint8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_u8m1_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m2_ta( @@ -1271,8 +1271,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmerge_vvm_u8m2_ta(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmerge_vvm_u8m2_ta(mask, op1, op2, vl); +vuint8m2_t test_vmerge_vvm_u8m2_ta(vuint8m2_t op1, vuint8m2_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_u8m2_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m2_ta( @@ -1280,8 +1280,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmerge_vxm_u8m2_ta(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmerge_vxm_u8m2_ta(mask, op1, op2, vl); +vuint8m2_t test_vmerge_vxm_u8m2_ta(vuint8m2_t op1, uint8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_u8m2_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m4_ta( @@ -1289,8 +1289,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmerge_vvm_u8m4_ta(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmerge_vvm_u8m4_ta(mask, op1, op2, vl); +vuint8m4_t test_vmerge_vvm_u8m4_ta(vuint8m4_t op1, vuint8m4_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_u8m4_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m4_ta( @@ -1298,8 +1298,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmerge_vxm_u8m4_ta(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmerge_vxm_u8m4_ta(mask, op1, op2, vl); +vuint8m4_t test_vmerge_vxm_u8m4_ta(vuint8m4_t op1, uint8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vxm_u8m4_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m8_ta( @@ -1307,8 +1307,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmerge_vvm_u8m8_ta(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmerge_vvm_u8m8_ta(mask, op1, op2, vl); +vuint8m8_t test_vmerge_vvm_u8m8_ta(vuint8m8_t op1, vuint8m8_t op2, vbool1_t mask, size_t vl) { + return vmerge_vvm_u8m8_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m8_ta( @@ -1316,8 +1316,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmerge_vxm_u8m8_ta(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmerge_vxm_u8m8_ta(mask, op1, op2, vl); +vuint8m8_t test_vmerge_vxm_u8m8_ta(vuint8m8_t op1, uint8_t op2, vbool1_t mask, size_t vl) { + return vmerge_vxm_u8m8_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf4_ta( @@ -1325,8 +1325,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmerge_vvm_u16mf4_ta(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmerge_vvm_u16mf4_ta(mask, op1, op2, vl); +vuint16mf4_t test_vmerge_vvm_u16mf4_ta(vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_u16mf4_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf4_ta( @@ -1334,8 +1334,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmerge_vxm_u16mf4_ta(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmerge_vxm_u16mf4_ta(mask, op1, op2, vl); +vuint16mf4_t test_vmerge_vxm_u16mf4_ta(vuint16mf4_t op1, uint16_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_u16mf4_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf2_ta( @@ -1343,8 +1343,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmerge_vvm_u16mf2_ta(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmerge_vvm_u16mf2_ta(mask, op1, op2, vl); +vuint16mf2_t test_vmerge_vvm_u16mf2_ta(vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_u16mf2_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf2_ta( @@ -1352,8 +1352,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmerge_vxm_u16mf2_ta(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmerge_vxm_u16mf2_ta(mask, op1, op2, vl); +vuint16mf2_t test_vmerge_vxm_u16mf2_ta(vuint16mf2_t op1, uint16_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_u16mf2_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m1_ta( @@ -1361,8 +1361,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmerge_vvm_u16m1_ta(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmerge_vvm_u16m1_ta(mask, op1, op2, vl); +vuint16m1_t test_vmerge_vvm_u16m1_ta(vuint16m1_t op1, vuint16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_u16m1_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m1_ta( @@ -1370,8 +1370,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmerge_vxm_u16m1_ta(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmerge_vxm_u16m1_ta(mask, op1, op2, vl); +vuint16m1_t test_vmerge_vxm_u16m1_ta(vuint16m1_t op1, uint16_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_u16m1_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m2_ta( @@ -1379,8 +1379,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmerge_vvm_u16m2_ta(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmerge_vvm_u16m2_ta(mask, op1, op2, vl); +vuint16m2_t test_vmerge_vvm_u16m2_ta(vuint16m2_t op1, vuint16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_u16m2_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m2_ta( @@ -1388,8 +1388,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmerge_vxm_u16m2_ta(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmerge_vxm_u16m2_ta(mask, op1, op2, vl); +vuint16m2_t test_vmerge_vxm_u16m2_ta(vuint16m2_t op1, uint16_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_u16m2_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m4_ta( @@ -1397,8 +1397,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmerge_vvm_u16m4_ta(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmerge_vvm_u16m4_ta(mask, op1, op2, vl); +vuint16m4_t test_vmerge_vvm_u16m4_ta(vuint16m4_t op1, vuint16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_u16m4_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m4_ta( @@ -1406,8 +1406,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmerge_vxm_u16m4_ta(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmerge_vxm_u16m4_ta(mask, op1, op2, vl); +vuint16m4_t test_vmerge_vxm_u16m4_ta(vuint16m4_t op1, uint16_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_u16m4_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m8_ta( @@ -1415,8 +1415,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmerge_vvm_u16m8_ta(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmerge_vvm_u16m8_ta(mask, op1, op2, vl); +vuint16m8_t test_vmerge_vvm_u16m8_ta(vuint16m8_t op1, vuint16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_u16m8_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m8_ta( @@ -1424,8 +1424,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmerge_vxm_u16m8_ta(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmerge_vxm_u16m8_ta(mask, op1, op2, vl); +vuint16m8_t test_vmerge_vxm_u16m8_ta(vuint16m8_t op1, uint16_t op2, vbool2_t mask, size_t vl) { + return vmerge_vxm_u16m8_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2_ta( @@ -1433,8 +1433,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmerge_vvm_u32mf2_ta(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmerge_vvm_u32mf2_ta(mask, op1, op2, vl); +vuint32mf2_t test_vmerge_vvm_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_u32mf2_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2_ta( @@ -1442,8 +1442,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmerge_vxm_u32mf2_ta(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmerge_vxm_u32mf2_ta(mask, op1, op2, vl); +vuint32mf2_t test_vmerge_vxm_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_u32mf2_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m1_ta( @@ -1451,8 +1451,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmerge_vvm_u32m1_ta(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmerge_vvm_u32m1_ta(mask, op1, op2, vl); +vuint32m1_t test_vmerge_vvm_u32m1_ta(vuint32m1_t op1, vuint32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_u32m1_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m1_ta( @@ -1460,8 +1460,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmerge_vxm_u32m1_ta(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmerge_vxm_u32m1_ta(mask, op1, op2, vl); +vuint32m1_t test_vmerge_vxm_u32m1_ta(vuint32m1_t op1, uint32_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_u32m1_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m2_ta( @@ -1469,8 +1469,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmerge_vvm_u32m2_ta(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmerge_vvm_u32m2_ta(mask, op1, op2, vl); +vuint32m2_t test_vmerge_vvm_u32m2_ta(vuint32m2_t op1, vuint32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_u32m2_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m2_ta( @@ -1478,8 +1478,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmerge_vxm_u32m2_ta(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmerge_vxm_u32m2_ta(mask, op1, op2, vl); +vuint32m2_t test_vmerge_vxm_u32m2_ta(vuint32m2_t op1, uint32_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_u32m2_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m4_ta( @@ -1487,8 +1487,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmerge_vvm_u32m4_ta(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmerge_vvm_u32m4_ta(mask, op1, op2, vl); +vuint32m4_t test_vmerge_vvm_u32m4_ta(vuint32m4_t op1, vuint32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_u32m4_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m4_ta( @@ -1496,8 +1496,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmerge_vxm_u32m4_ta(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmerge_vxm_u32m4_ta(mask, op1, op2, vl); +vuint32m4_t test_vmerge_vxm_u32m4_ta(vuint32m4_t op1, uint32_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_u32m4_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m8_ta( @@ -1505,8 +1505,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmerge_vvm_u32m8_ta(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmerge_vvm_u32m8_ta(mask, op1, op2, vl); +vuint32m8_t test_vmerge_vvm_u32m8_ta(vuint32m8_t op1, vuint32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_u32m8_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m8_ta( @@ -1514,8 +1514,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmerge_vxm_u32m8_ta(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmerge_vxm_u32m8_ta(mask, op1, op2, vl); +vuint32m8_t test_vmerge_vxm_u32m8_ta(vuint32m8_t op1, uint32_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_u32m8_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m1_ta( @@ -1523,8 +1523,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmerge_vvm_u64m1_ta(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmerge_vvm_u64m1_ta(mask, op1, op2, vl); +vuint64m1_t test_vmerge_vvm_u64m1_ta(vuint64m1_t op1, vuint64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_u64m1_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m1_ta( @@ -1532,8 +1532,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmerge_vxm_u64m1_ta(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmerge_vxm_u64m1_ta(mask, op1, op2, vl); +vuint64m1_t test_vmerge_vxm_u64m1_ta(vuint64m1_t op1, uint64_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_u64m1_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m2_ta( @@ -1541,8 +1541,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmerge_vvm_u64m2_ta(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmerge_vvm_u64m2_ta(mask, op1, op2, vl); +vuint64m2_t test_vmerge_vvm_u64m2_ta(vuint64m2_t op1, vuint64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_u64m2_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m2_ta( @@ -1550,8 +1550,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmerge_vxm_u64m2_ta(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmerge_vxm_u64m2_ta(mask, op1, op2, vl); +vuint64m2_t test_vmerge_vxm_u64m2_ta(vuint64m2_t op1, uint64_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_u64m2_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m4_ta( @@ -1559,8 +1559,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmerge_vvm_u64m4_ta(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmerge_vvm_u64m4_ta(mask, op1, op2, vl); +vuint64m4_t test_vmerge_vvm_u64m4_ta(vuint64m4_t op1, vuint64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_u64m4_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m4_ta( @@ -1568,8 +1568,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmerge_vxm_u64m4_ta(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmerge_vxm_u64m4_ta(mask, op1, op2, vl); +vuint64m4_t test_vmerge_vxm_u64m4_ta(vuint64m4_t op1, uint64_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_u64m4_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m8_ta( @@ -1577,8 +1577,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmerge_vvm_u64m8_ta(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmerge_vvm_u64m8_ta(mask, op1, op2, vl); +vuint64m8_t test_vmerge_vvm_u64m8_ta(vuint64m8_t op1, vuint64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_u64m8_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m8_ta( @@ -1586,8 +1586,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmerge_vxm_u64m8_ta(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmerge_vxm_u64m8_ta(mask, op1, op2, vl); +vuint64m8_t test_vmerge_vxm_u64m8_ta(vuint64m8_t op1, uint64_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_u64m8_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf4_tu( @@ -1595,8 +1595,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vmerge_vvm_f16mf4_tu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmerge_vvm_f16mf4_tu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vmerge_vvm_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_f16mf4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf2_tu( @@ -1604,8 +1604,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vmerge_vvm_f16mf2_tu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmerge_vvm_f16mf2_tu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vmerge_vvm_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_f16mf2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m1_tu( @@ -1613,8 +1613,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vmerge_vvm_f16m1_tu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmerge_vvm_f16m1_tu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vmerge_vvm_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_f16m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m2_tu( @@ -1622,8 +1622,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vmerge_vvm_f16m2_tu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmerge_vvm_f16m2_tu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vmerge_vvm_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_f16m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m4_tu( @@ -1631,8 +1631,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vmerge_vvm_f16m4_tu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmerge_vvm_f16m4_tu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vmerge_vvm_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_f16m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m8_tu( @@ -1640,8 +1640,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vmerge_vvm_f16m8_tu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmerge_vvm_f16m8_tu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vmerge_vvm_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_f16m8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2_tu( @@ -1649,8 +1649,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vmerge_vvm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmerge_vvm_f32mf2_tu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vmerge_vvm_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_f32mf2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m1_tu( @@ -1658,8 +1658,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vmerge_vvm_f32m1_tu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vmerge_vvm_f32m1_tu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vmerge_vvm_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_f32m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m2_tu( @@ -1667,8 +1667,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vmerge_vvm_f32m2_tu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vmerge_vvm_f32m2_tu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vmerge_vvm_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_f32m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m4_tu( @@ -1676,8 +1676,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vmerge_vvm_f32m4_tu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vmerge_vvm_f32m4_tu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vmerge_vvm_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_f32m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m8_tu( @@ -1685,8 +1685,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vmerge_vvm_f32m8_tu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vmerge_vvm_f32m8_tu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vmerge_vvm_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_f32m8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m1_tu( @@ -1694,8 +1694,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vmerge_vvm_f64m1_tu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vmerge_vvm_f64m1_tu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vmerge_vvm_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_f64m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m2_tu( @@ -1703,8 +1703,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vmerge_vvm_f64m2_tu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vmerge_vvm_f64m2_tu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vmerge_vvm_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_f64m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m4_tu( @@ -1712,8 +1712,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vmerge_vvm_f64m4_tu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vmerge_vvm_f64m4_tu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vmerge_vvm_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_f64m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m8_tu( @@ -1721,8 +1721,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vmerge_vvm_f64m8_tu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vmerge_vvm_f64m8_tu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vmerge_vvm_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_f64m8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf4_ta( @@ -1730,8 +1730,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vmerge_vvm_f16mf4_ta(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmerge_vvm_f16mf4_ta(mask, op1, op2, vl); +vfloat16mf4_t test_vmerge_vvm_f16mf4_ta(vfloat16mf4_t op1, vfloat16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_f16mf4_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf2_ta( @@ -1739,8 +1739,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vmerge_vvm_f16mf2_ta(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmerge_vvm_f16mf2_ta(mask, op1, op2, vl); +vfloat16mf2_t test_vmerge_vvm_f16mf2_ta(vfloat16mf2_t op1, vfloat16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_f16mf2_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m1_ta( @@ -1748,8 +1748,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vmerge_vvm_f16m1_ta(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmerge_vvm_f16m1_ta(mask, op1, op2, vl); +vfloat16m1_t test_vmerge_vvm_f16m1_ta(vfloat16m1_t op1, vfloat16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_f16m1_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m2_ta( @@ -1757,8 +1757,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vmerge_vvm_f16m2_ta(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmerge_vvm_f16m2_ta(mask, op1, op2, vl); +vfloat16m2_t test_vmerge_vvm_f16m2_ta(vfloat16m2_t op1, vfloat16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_f16m2_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m4_ta( @@ -1766,8 +1766,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vmerge_vvm_f16m4_ta(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmerge_vvm_f16m4_ta(mask, op1, op2, vl); +vfloat16m4_t test_vmerge_vvm_f16m4_ta(vfloat16m4_t op1, vfloat16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_f16m4_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m8_ta( @@ -1775,8 +1775,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vmerge_vvm_f16m8_ta(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmerge_vvm_f16m8_ta(mask, op1, op2, vl); +vfloat16m8_t test_vmerge_vvm_f16m8_ta(vfloat16m8_t op1, vfloat16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_f16m8_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2_ta( @@ -1784,8 +1784,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vmerge_vvm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmerge_vvm_f32mf2_ta(mask, op1, op2, vl); +vfloat32mf2_t test_vmerge_vvm_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_f32mf2_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m1_ta( @@ -1793,8 +1793,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vmerge_vvm_f32m1_ta(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vmerge_vvm_f32m1_ta(mask, op1, op2, vl); +vfloat32m1_t test_vmerge_vvm_f32m1_ta(vfloat32m1_t op1, vfloat32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_f32m1_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m2_ta( @@ -1802,8 +1802,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vmerge_vvm_f32m2_ta(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vmerge_vvm_f32m2_ta(mask, op1, op2, vl); +vfloat32m2_t test_vmerge_vvm_f32m2_ta(vfloat32m2_t op1, vfloat32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_f32m2_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m4_ta( @@ -1811,8 +1811,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vmerge_vvm_f32m4_ta(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vmerge_vvm_f32m4_ta(mask, op1, op2, vl); +vfloat32m4_t test_vmerge_vvm_f32m4_ta(vfloat32m4_t op1, vfloat32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_f32m4_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m8_ta( @@ -1820,8 +1820,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vmerge_vvm_f32m8_ta(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vmerge_vvm_f32m8_ta(mask, op1, op2, vl); +vfloat32m8_t test_vmerge_vvm_f32m8_ta(vfloat32m8_t op1, vfloat32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_f32m8_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m1_ta( @@ -1829,8 +1829,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vmerge_vvm_f64m1_ta(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vmerge_vvm_f64m1_ta(mask, op1, op2, vl); +vfloat64m1_t test_vmerge_vvm_f64m1_ta(vfloat64m1_t op1, vfloat64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_f64m1_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m2_ta( @@ -1838,8 +1838,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vmerge_vvm_f64m2_ta(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vmerge_vvm_f64m2_ta(mask, op1, op2, vl); +vfloat64m2_t test_vmerge_vvm_f64m2_ta(vfloat64m2_t op1, vfloat64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_f64m2_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m4_ta( @@ -1847,8 +1847,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vmerge_vvm_f64m4_ta(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vmerge_vvm_f64m4_ta(mask, op1, op2, vl); +vfloat64m4_t test_vmerge_vvm_f64m4_ta(vfloat64m4_t op1, vfloat64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_f64m4_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m8_ta( @@ -1856,7 +1856,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vmerge_vvm_f64m8_ta(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vmerge_vvm_f64m8_ta(mask, op1, op2, vl); +vfloat64m8_t test_vmerge_vvm_f64m8_ta(vfloat64m8_t op1, vfloat64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_f64m8_ta(op1, op2, mask, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vcompress.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vcompress.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vcompress.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vcompress.c @@ -11,8 +11,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vcompress_vm_i8mf8_tu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vint8mf8_t test_vcompress_vm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t src, vbool64_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf4_tu( @@ -20,8 +20,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vcompress_vm_i8mf4_tu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vint8mf4_t test_vcompress_vm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t src, vbool32_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf2_tu( @@ -29,8 +29,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vcompress_vm_i8mf2_tu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vint8mf2_t test_vcompress_vm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t src, vbool16_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m1_tu( @@ -38,8 +38,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vcompress_vm_i8m1_tu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vint8m1_t test_vcompress_vm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t src, vbool8_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m2_tu( @@ -47,8 +47,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vcompress_vm_i8m2_tu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vint8m2_t test_vcompress_vm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t src, vbool4_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m4_tu( @@ -56,8 +56,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vcompress_vm_i8m4_tu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vint8m4_t test_vcompress_vm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t src, vbool2_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m8_tu( @@ -65,8 +65,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vcompress_vm_i8m8_tu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vint8m8_t test_vcompress_vm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t src, vbool1_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf4_tu( @@ -74,8 +74,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vcompress_vm_i16mf4_tu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vint16mf4_t test_vcompress_vm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf2_tu( @@ -83,8 +83,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vcompress_vm_i16mf2_tu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vint16mf2_t test_vcompress_vm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m1_tu( @@ -92,8 +92,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vcompress_vm_i16m1_tu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vint16m1_t test_vcompress_vm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m2_tu( @@ -101,8 +101,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vcompress_vm_i16m2_tu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vint16m2_t test_vcompress_vm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m4_tu( @@ -110,8 +110,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vcompress_vm_i16m4_tu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vint16m4_t test_vcompress_vm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m8_tu( @@ -119,8 +119,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vcompress_vm_i16m8_tu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vint16m8_t test_vcompress_vm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2_tu( @@ -128,8 +128,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vcompress_vm_i32mf2_tu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vint32mf2_t test_vcompress_vm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m1_tu( @@ -137,8 +137,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vcompress_vm_i32m1_tu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vint32m1_t test_vcompress_vm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m2_tu( @@ -146,8 +146,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vcompress_vm_i32m2_tu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vint32m2_t test_vcompress_vm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m4_tu( @@ -155,8 +155,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vcompress_vm_i32m4_tu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vint32m4_t test_vcompress_vm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m8_tu( @@ -164,8 +164,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vcompress_vm_i32m8_tu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vint32m8_t test_vcompress_vm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m1_tu( @@ -173,8 +173,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vcompress_vm_i64m1_tu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vint64m1_t test_vcompress_vm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m2_tu( @@ -182,8 +182,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vcompress_vm_i64m2_tu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vint64m2_t test_vcompress_vm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m4_tu( @@ -191,8 +191,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vcompress_vm_i64m4_tu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vint64m4_t test_vcompress_vm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m8_tu( @@ -200,8 +200,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vcompress_vm_i64m8_tu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vint64m8_t test_vcompress_vm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf8_tu( @@ -209,8 +209,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vcompress_vm_u8mf8_tu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vuint8mf8_t test_vcompress_vm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t src, vbool64_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf4_tu( @@ -218,8 +218,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vcompress_vm_u8mf4_tu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vuint8mf4_t test_vcompress_vm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t src, vbool32_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf2_tu( @@ -227,8 +227,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vcompress_vm_u8mf2_tu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vuint8mf2_t test_vcompress_vm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t src, vbool16_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m1_tu( @@ -236,8 +236,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vcompress_vm_u8m1_tu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vuint8m1_t test_vcompress_vm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t src, vbool8_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m2_tu( @@ -245,8 +245,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vcompress_vm_u8m2_tu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vuint8m2_t test_vcompress_vm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t src, vbool4_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m4_tu( @@ -254,8 +254,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vcompress_vm_u8m4_tu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vuint8m4_t test_vcompress_vm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t src, vbool2_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m8_tu( @@ -263,8 +263,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vcompress_vm_u8m8_tu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vuint8m8_t test_vcompress_vm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t src, vbool1_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf4_tu( @@ -272,8 +272,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vcompress_vm_u16mf4_tu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vuint16mf4_t test_vcompress_vm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf2_tu( @@ -281,8 +281,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vcompress_vm_u16mf2_tu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vuint16mf2_t test_vcompress_vm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m1_tu( @@ -290,8 +290,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vcompress_vm_u16m1_tu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vuint16m1_t test_vcompress_vm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m2_tu( @@ -299,8 +299,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vcompress_vm_u16m2_tu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vuint16m2_t test_vcompress_vm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m4_tu( @@ -308,8 +308,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vcompress_vm_u16m4_tu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vuint16m4_t test_vcompress_vm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m8_tu( @@ -317,8 +317,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vcompress_vm_u16m8_tu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vuint16m8_t test_vcompress_vm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2_tu( @@ -326,8 +326,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vcompress_vm_u32mf2_tu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vuint32mf2_t test_vcompress_vm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m1_tu( @@ -335,8 +335,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vcompress_vm_u32m1_tu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vuint32m1_t test_vcompress_vm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m2_tu( @@ -344,8 +344,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vcompress_vm_u32m2_tu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vuint32m2_t test_vcompress_vm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m4_tu( @@ -353,8 +353,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vcompress_vm_u32m4_tu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vuint32m4_t test_vcompress_vm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m8_tu( @@ -362,8 +362,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vcompress_vm_u32m8_tu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vuint32m8_t test_vcompress_vm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m1_tu( @@ -371,8 +371,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vcompress_vm_u64m1_tu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vuint64m1_t test_vcompress_vm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m2_tu( @@ -380,8 +380,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vcompress_vm_u64m2_tu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vuint64m2_t test_vcompress_vm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m4_tu( @@ -389,8 +389,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vcompress_vm_u64m4_tu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vuint64m4_t test_vcompress_vm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m8_tu( @@ -398,8 +398,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vcompress_vm_u64m8_tu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vuint64m8_t test_vcompress_vm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16mf4_tu( @@ -407,8 +407,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vcompress_vm_f16mf4_tu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vfloat16mf4_t test_vcompress_vm_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16mf2_tu( @@ -416,8 +416,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vcompress_vm_f16mf2_tu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vfloat16mf2_t test_vcompress_vm_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16m1_tu( @@ -425,8 +425,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vcompress_vm_f16m1_tu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vfloat16m1_t test_vcompress_vm_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16m2_tu( @@ -434,8 +434,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vcompress_vm_f16m2_tu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vfloat16m2_t test_vcompress_vm_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16m4_tu( @@ -443,8 +443,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vcompress_vm_f16m4_tu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vfloat16m4_t test_vcompress_vm_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16m8_tu( @@ -452,8 +452,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vcompress_vm_f16m8_tu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vfloat16m8_t test_vcompress_vm_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2_tu( @@ -461,8 +461,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vcompress_vm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vfloat32mf2_t test_vcompress_vm_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m1_tu( @@ -470,8 +470,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vcompress_vm_f32m1_tu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vfloat32m1_t test_vcompress_vm_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m2_tu( @@ -479,8 +479,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vcompress_vm_f32m2_tu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vfloat32m2_t test_vcompress_vm_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m4_tu( @@ -488,8 +488,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vcompress_vm_f32m4_tu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vfloat32m4_t test_vcompress_vm_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m8_tu( @@ -497,8 +497,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vcompress_vm_f32m8_tu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vfloat32m8_t test_vcompress_vm_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m1_tu( @@ -506,8 +506,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vcompress_vm_f64m1_tu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vfloat64m1_t test_vcompress_vm_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m2_tu( @@ -515,8 +515,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vcompress_vm_f64m2_tu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vfloat64m2_t test_vcompress_vm_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m4_tu( @@ -524,8 +524,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vcompress_vm_f64m4_tu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vfloat64m4_t test_vcompress_vm_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m8_tu( @@ -533,8 +533,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vcompress_vm_f64m8_tu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return vcompress_tu(mask, maskedoff, src, vl); +vfloat64m8_t test_vcompress_vm_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf8_ta( @@ -542,8 +542,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vcompress_vm_i8mf8_ta(vbool64_t mask, vint8mf8_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vint8mf8_t test_vcompress_vm_i8mf8_ta(vint8mf8_t src, vbool64_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf4_ta( @@ -551,8 +551,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vcompress_vm_i8mf4_ta(vbool32_t mask, vint8mf4_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vint8mf4_t test_vcompress_vm_i8mf4_ta(vint8mf4_t src, vbool32_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf2_ta( @@ -560,8 +560,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vcompress_vm_i8mf2_ta(vbool16_t mask, vint8mf2_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vint8mf2_t test_vcompress_vm_i8mf2_ta(vint8mf2_t src, vbool16_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m1_ta( @@ -569,8 +569,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vcompress_vm_i8m1_ta(vbool8_t mask, vint8m1_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vint8m1_t test_vcompress_vm_i8m1_ta(vint8m1_t src, vbool8_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m2_ta( @@ -578,8 +578,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vcompress_vm_i8m2_ta(vbool4_t mask, vint8m2_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vint8m2_t test_vcompress_vm_i8m2_ta(vint8m2_t src, vbool4_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m4_ta( @@ -587,8 +587,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vcompress_vm_i8m4_ta(vbool2_t mask, vint8m4_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vint8m4_t test_vcompress_vm_i8m4_ta(vint8m4_t src, vbool2_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m8_ta( @@ -596,8 +596,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv64i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vcompress_vm_i8m8_ta(vbool1_t mask, vint8m8_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vint8m8_t test_vcompress_vm_i8m8_ta(vint8m8_t src, vbool1_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf4_ta( @@ -605,8 +605,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vcompress_vm_i16mf4_ta(vbool64_t mask, vint16mf4_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vint16mf4_t test_vcompress_vm_i16mf4_ta(vint16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf2_ta( @@ -614,8 +614,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vcompress_vm_i16mf2_ta(vbool32_t mask, vint16mf2_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vint16mf2_t test_vcompress_vm_i16mf2_ta(vint16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m1_ta( @@ -623,8 +623,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vcompress_vm_i16m1_ta(vbool16_t mask, vint16m1_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vint16m1_t test_vcompress_vm_i16m1_ta(vint16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m2_ta( @@ -632,8 +632,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vcompress_vm_i16m2_ta(vbool8_t mask, vint16m2_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vint16m2_t test_vcompress_vm_i16m2_ta(vint16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m4_ta( @@ -641,8 +641,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vcompress_vm_i16m4_ta(vbool4_t mask, vint16m4_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vint16m4_t test_vcompress_vm_i16m4_ta(vint16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m8_ta( @@ -650,8 +650,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vcompress_vm_i16m8_ta(vbool2_t mask, vint16m8_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vint16m8_t test_vcompress_vm_i16m8_ta(vint16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2_ta( @@ -659,8 +659,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vcompress_vm_i32mf2_ta(vbool64_t mask, vint32mf2_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vint32mf2_t test_vcompress_vm_i32mf2_ta(vint32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m1_ta( @@ -668,8 +668,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vcompress_vm_i32m1_ta(vbool32_t mask, vint32m1_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vint32m1_t test_vcompress_vm_i32m1_ta(vint32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m2_ta( @@ -677,8 +677,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vcompress_vm_i32m2_ta(vbool16_t mask, vint32m2_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vint32m2_t test_vcompress_vm_i32m2_ta(vint32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m4_ta( @@ -686,8 +686,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vcompress_vm_i32m4_ta(vbool8_t mask, vint32m4_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vint32m4_t test_vcompress_vm_i32m4_ta(vint32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m8_ta( @@ -695,8 +695,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vcompress_vm_i32m8_ta(vbool4_t mask, vint32m8_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vint32m8_t test_vcompress_vm_i32m8_ta(vint32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m1_ta( @@ -704,8 +704,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vcompress_vm_i64m1_ta(vbool64_t mask, vint64m1_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vint64m1_t test_vcompress_vm_i64m1_ta(vint64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m2_ta( @@ -713,8 +713,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vcompress_vm_i64m2_ta(vbool32_t mask, vint64m2_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vint64m2_t test_vcompress_vm_i64m2_ta(vint64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m4_ta( @@ -722,8 +722,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vcompress_vm_i64m4_ta(vbool16_t mask, vint64m4_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vint64m4_t test_vcompress_vm_i64m4_ta(vint64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m8_ta( @@ -731,8 +731,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vcompress_vm_i64m8_ta(vbool8_t mask, vint64m8_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vint64m8_t test_vcompress_vm_i64m8_ta(vint64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf8_ta( @@ -740,8 +740,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vcompress_vm_u8mf8_ta(vbool64_t mask, vuint8mf8_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vuint8mf8_t test_vcompress_vm_u8mf8_ta(vuint8mf8_t src, vbool64_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf4_ta( @@ -749,8 +749,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vcompress_vm_u8mf4_ta(vbool32_t mask, vuint8mf4_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vuint8mf4_t test_vcompress_vm_u8mf4_ta(vuint8mf4_t src, vbool32_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf2_ta( @@ -758,8 +758,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vcompress_vm_u8mf2_ta(vbool16_t mask, vuint8mf2_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vuint8mf2_t test_vcompress_vm_u8mf2_ta(vuint8mf2_t src, vbool16_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m1_ta( @@ -767,8 +767,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vcompress_vm_u8m1_ta(vbool8_t mask, vuint8m1_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vuint8m1_t test_vcompress_vm_u8m1_ta(vuint8m1_t src, vbool8_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m2_ta( @@ -776,8 +776,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vcompress_vm_u8m2_ta(vbool4_t mask, vuint8m2_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vuint8m2_t test_vcompress_vm_u8m2_ta(vuint8m2_t src, vbool4_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m4_ta( @@ -785,8 +785,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vcompress_vm_u8m4_ta(vbool2_t mask, vuint8m4_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vuint8m4_t test_vcompress_vm_u8m4_ta(vuint8m4_t src, vbool2_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m8_ta( @@ -794,8 +794,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv64i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vcompress_vm_u8m8_ta(vbool1_t mask, vuint8m8_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vuint8m8_t test_vcompress_vm_u8m8_ta(vuint8m8_t src, vbool1_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf4_ta( @@ -803,8 +803,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vcompress_vm_u16mf4_ta(vbool64_t mask, vuint16mf4_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vuint16mf4_t test_vcompress_vm_u16mf4_ta(vuint16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf2_ta( @@ -812,8 +812,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vcompress_vm_u16mf2_ta(vbool32_t mask, vuint16mf2_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vuint16mf2_t test_vcompress_vm_u16mf2_ta(vuint16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m1_ta( @@ -821,8 +821,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vcompress_vm_u16m1_ta(vbool16_t mask, vuint16m1_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vuint16m1_t test_vcompress_vm_u16m1_ta(vuint16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m2_ta( @@ -830,8 +830,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vcompress_vm_u16m2_ta(vbool8_t mask, vuint16m2_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vuint16m2_t test_vcompress_vm_u16m2_ta(vuint16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m4_ta( @@ -839,8 +839,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vcompress_vm_u16m4_ta(vbool4_t mask, vuint16m4_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vuint16m4_t test_vcompress_vm_u16m4_ta(vuint16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m8_ta( @@ -848,8 +848,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vcompress_vm_u16m8_ta(vbool2_t mask, vuint16m8_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vuint16m8_t test_vcompress_vm_u16m8_ta(vuint16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2_ta( @@ -857,8 +857,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vcompress_vm_u32mf2_ta(vbool64_t mask, vuint32mf2_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vuint32mf2_t test_vcompress_vm_u32mf2_ta(vuint32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m1_ta( @@ -866,8 +866,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vcompress_vm_u32m1_ta(vbool32_t mask, vuint32m1_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vuint32m1_t test_vcompress_vm_u32m1_ta(vuint32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m2_ta( @@ -875,8 +875,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vcompress_vm_u32m2_ta(vbool16_t mask, vuint32m2_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vuint32m2_t test_vcompress_vm_u32m2_ta(vuint32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m4_ta( @@ -884,8 +884,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vcompress_vm_u32m4_ta(vbool8_t mask, vuint32m4_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vuint32m4_t test_vcompress_vm_u32m4_ta(vuint32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m8_ta( @@ -893,8 +893,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vcompress_vm_u32m8_ta(vbool4_t mask, vuint32m8_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vuint32m8_t test_vcompress_vm_u32m8_ta(vuint32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m1_ta( @@ -902,8 +902,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vcompress_vm_u64m1_ta(vbool64_t mask, vuint64m1_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vuint64m1_t test_vcompress_vm_u64m1_ta(vuint64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m2_ta( @@ -911,8 +911,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vcompress_vm_u64m2_ta(vbool32_t mask, vuint64m2_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vuint64m2_t test_vcompress_vm_u64m2_ta(vuint64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m4_ta( @@ -920,8 +920,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vcompress_vm_u64m4_ta(vbool16_t mask, vuint64m4_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vuint64m4_t test_vcompress_vm_u64m4_ta(vuint64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m8_ta( @@ -929,8 +929,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vcompress_vm_u64m8_ta(vbool8_t mask, vuint64m8_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vuint64m8_t test_vcompress_vm_u64m8_ta(vuint64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16mf4_ta( @@ -938,8 +938,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vcompress_vm_f16mf4_ta(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vfloat16mf4_t test_vcompress_vm_f16mf4_ta(vfloat16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16mf2_ta( @@ -947,8 +947,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vcompress_vm_f16mf2_ta(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vfloat16mf2_t test_vcompress_vm_f16mf2_ta(vfloat16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16m1_ta( @@ -956,8 +956,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vcompress_vm_f16m1_ta(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vfloat16m1_t test_vcompress_vm_f16m1_ta(vfloat16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16m2_ta( @@ -965,8 +965,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vcompress_vm_f16m2_ta(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vfloat16m2_t test_vcompress_vm_f16m2_ta(vfloat16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16m4_ta( @@ -974,8 +974,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vcompress_vm_f16m4_ta(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vfloat16m4_t test_vcompress_vm_f16m4_ta(vfloat16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16m8_ta( @@ -983,8 +983,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vcompress_vm_f16m8_ta(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vfloat16m8_t test_vcompress_vm_f16m8_ta(vfloat16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2_ta( @@ -992,8 +992,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vcompress_vm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vfloat32mf2_t test_vcompress_vm_f32mf2_ta(vfloat32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m1_ta( @@ -1001,8 +1001,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vcompress_vm_f32m1_ta(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vfloat32m1_t test_vcompress_vm_f32m1_ta(vfloat32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m2_ta( @@ -1010,8 +1010,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vcompress_vm_f32m2_ta(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vfloat32m2_t test_vcompress_vm_f32m2_ta(vfloat32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m4_ta( @@ -1019,8 +1019,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vcompress_vm_f32m4_ta(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vfloat32m4_t test_vcompress_vm_f32m4_ta(vfloat32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m8_ta( @@ -1028,8 +1028,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vcompress_vm_f32m8_ta(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vfloat32m8_t test_vcompress_vm_f32m8_ta(vfloat32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m1_ta( @@ -1037,8 +1037,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vcompress_vm_f64m1_ta(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vfloat64m1_t test_vcompress_vm_f64m1_ta(vfloat64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m2_ta( @@ -1046,8 +1046,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vcompress_vm_f64m2_ta(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vfloat64m2_t test_vcompress_vm_f64m2_ta(vfloat64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m4_ta( @@ -1055,8 +1055,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vcompress_vm_f64m4_ta(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vfloat64m4_t test_vcompress_vm_f64m4_ta(vfloat64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m8_ta( @@ -1064,7 +1064,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vcompress_vm_f64m8_ta(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vfloat64m8_t test_vcompress_vm_f64m8_ta(vfloat64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmerge.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmerge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmerge.c @@ -11,8 +11,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmerge_vfm_f16mf4_tu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfmerge_tu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmerge_vfm_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, vbool64_t mask, size_t vl) { + return vfmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16mf2_tu( @@ -20,8 +20,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmerge_vfm_f16mf2_tu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfmerge_tu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmerge_vfm_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, vbool32_t mask, size_t vl) { + return vfmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m1_tu( @@ -29,8 +29,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmerge_vfm_f16m1_tu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfmerge_tu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmerge_vfm_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, vbool16_t mask, size_t vl) { + return vfmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m2_tu( @@ -38,8 +38,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmerge_vfm_f16m2_tu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfmerge_tu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmerge_vfm_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, vbool8_t mask, size_t vl) { + return vfmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m4_tu( @@ -47,8 +47,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmerge_vfm_f16m4_tu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfmerge_tu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmerge_vfm_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, vbool4_t mask, size_t vl) { + return vfmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m8_tu( @@ -56,8 +56,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmerge_vfm_f16m8_tu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfmerge_tu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmerge_vfm_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, vbool2_t mask, size_t vl) { + return vfmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2_tu( @@ -65,8 +65,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmerge_vfm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfmerge_tu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmerge_vfm_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, vbool64_t mask, size_t vl) { + return vfmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m1_tu( @@ -74,8 +74,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmerge_vfm_f32m1_tu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfmerge_tu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmerge_vfm_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, vbool32_t mask, size_t vl) { + return vfmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m2_tu( @@ -83,8 +83,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmerge_vfm_f32m2_tu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfmerge_tu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmerge_vfm_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, vbool16_t mask, size_t vl) { + return vfmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m4_tu( @@ -92,8 +92,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmerge_vfm_f32m4_tu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfmerge_tu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmerge_vfm_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, vbool8_t mask, size_t vl) { + return vfmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m8_tu( @@ -101,8 +101,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmerge_vfm_f32m8_tu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfmerge_tu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmerge_vfm_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, vbool4_t mask, size_t vl) { + return vfmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m1_tu( @@ -110,8 +110,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmerge_vfm_f64m1_tu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfmerge_tu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmerge_vfm_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, vbool64_t mask, size_t vl) { + return vfmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m2_tu( @@ -119,8 +119,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmerge_vfm_f64m2_tu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfmerge_tu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmerge_vfm_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, vbool32_t mask, size_t vl) { + return vfmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m4_tu( @@ -128,8 +128,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmerge_vfm_f64m4_tu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfmerge_tu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmerge_vfm_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, vbool16_t mask, size_t vl) { + return vfmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m8_tu( @@ -137,8 +137,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmerge_vfm_f64m8_tu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfmerge_tu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmerge_vfm_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, vbool8_t mask, size_t vl) { + return vfmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16mf4_ta( @@ -146,8 +146,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmerge_vfm_f16mf4_ta(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfmerge_ta(mask, op1, op2, vl); +vfloat16mf4_t test_vfmerge_vfm_f16mf4_ta(vfloat16mf4_t op1, _Float16 op2, vbool64_t mask, size_t vl) { + return vfmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16mf2_ta( @@ -155,8 +155,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmerge_vfm_f16mf2_ta(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfmerge_ta(mask, op1, op2, vl); +vfloat16mf2_t test_vfmerge_vfm_f16mf2_ta(vfloat16mf2_t op1, _Float16 op2, vbool32_t mask, size_t vl) { + return vfmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m1_ta( @@ -164,8 +164,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmerge_vfm_f16m1_ta(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfmerge_ta(mask, op1, op2, vl); +vfloat16m1_t test_vfmerge_vfm_f16m1_ta(vfloat16m1_t op1, _Float16 op2, vbool16_t mask, size_t vl) { + return vfmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m2_ta( @@ -173,8 +173,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmerge_vfm_f16m2_ta(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfmerge_ta(mask, op1, op2, vl); +vfloat16m2_t test_vfmerge_vfm_f16m2_ta(vfloat16m2_t op1, _Float16 op2, vbool8_t mask, size_t vl) { + return vfmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m4_ta( @@ -182,8 +182,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmerge_vfm_f16m4_ta(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfmerge_ta(mask, op1, op2, vl); +vfloat16m4_t test_vfmerge_vfm_f16m4_ta(vfloat16m4_t op1, _Float16 op2, vbool4_t mask, size_t vl) { + return vfmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m8_ta( @@ -191,8 +191,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmerge_vfm_f16m8_ta(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfmerge_ta(mask, op1, op2, vl); +vfloat16m8_t test_vfmerge_vfm_f16m8_ta(vfloat16m8_t op1, _Float16 op2, vbool2_t mask, size_t vl) { + return vfmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2_ta( @@ -200,8 +200,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmerge_vfm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { - return vfmerge_ta(mask, op1, op2, vl); +vfloat32mf2_t test_vfmerge_vfm_f32mf2_ta(vfloat32mf2_t op1, float op2, vbool64_t mask, size_t vl) { + return vfmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m1_ta( @@ -209,8 +209,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmerge_vfm_f32m1_ta(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return vfmerge_ta(mask, op1, op2, vl); +vfloat32m1_t test_vfmerge_vfm_f32m1_ta(vfloat32m1_t op1, float op2, vbool32_t mask, size_t vl) { + return vfmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m2_ta( @@ -218,8 +218,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmerge_vfm_f32m2_ta(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return vfmerge_ta(mask, op1, op2, vl); +vfloat32m2_t test_vfmerge_vfm_f32m2_ta(vfloat32m2_t op1, float op2, vbool16_t mask, size_t vl) { + return vfmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m4_ta( @@ -227,8 +227,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmerge_vfm_f32m4_ta(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return vfmerge_ta(mask, op1, op2, vl); +vfloat32m4_t test_vfmerge_vfm_f32m4_ta(vfloat32m4_t op1, float op2, vbool8_t mask, size_t vl) { + return vfmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m8_ta( @@ -236,8 +236,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmerge_vfm_f32m8_ta(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { - return vfmerge_ta(mask, op1, op2, vl); +vfloat32m8_t test_vfmerge_vfm_f32m8_ta(vfloat32m8_t op1, float op2, vbool4_t mask, size_t vl) { + return vfmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m1_ta( @@ -245,8 +245,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmerge_vfm_f64m1_ta(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { - return vfmerge_ta(mask, op1, op2, vl); +vfloat64m1_t test_vfmerge_vfm_f64m1_ta(vfloat64m1_t op1, double op2, vbool64_t mask, size_t vl) { + return vfmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m2_ta( @@ -254,8 +254,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmerge_vfm_f64m2_ta(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { - return vfmerge_ta(mask, op1, op2, vl); +vfloat64m2_t test_vfmerge_vfm_f64m2_ta(vfloat64m2_t op1, double op2, vbool32_t mask, size_t vl) { + return vfmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m4_ta( @@ -263,8 +263,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmerge_vfm_f64m4_ta(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { - return vfmerge_ta(mask, op1, op2, vl); +vfloat64m4_t test_vfmerge_vfm_f64m4_ta(vfloat64m4_t op1, double op2, vbool16_t mask, size_t vl) { + return vfmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m8_ta( @@ -272,7 +272,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmerge_vfm_f64m8_ta(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { - return vfmerge_ta(mask, op1, op2, vl); +vfloat64m8_t test_vfmerge_vfm_f64m8_ta(vfloat64m8_t op1, double op2, vbool8_t mask, size_t vl) { + return vfmerge_ta(op1, op2, mask, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmerge.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmerge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmerge.c @@ -11,8 +11,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmerge_vvm_i8mf8_tu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmerge_vvm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf8_tu( @@ -20,8 +20,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmerge_vxm_i8mf8_tu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmerge_vxm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf4_tu( @@ -29,8 +29,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmerge_vvm_i8mf4_tu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmerge_vvm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf4_tu( @@ -38,8 +38,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmerge_vxm_i8mf4_tu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmerge_vxm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf2_tu( @@ -47,8 +47,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmerge_vvm_i8mf2_tu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmerge_vvm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf2_tu( @@ -56,8 +56,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmerge_vxm_i8mf2_tu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmerge_vxm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m1_tu( @@ -65,8 +65,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmerge_vvm_i8m1_tu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmerge_vvm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m1_tu( @@ -74,8 +74,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmerge_vxm_i8m1_tu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmerge_vxm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m2_tu( @@ -83,8 +83,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmerge_vvm_i8m2_tu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmerge_vvm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, vbool4_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m2_tu( @@ -92,8 +92,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmerge_vxm_i8m2_tu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmerge_vxm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, vbool4_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m4_tu( @@ -101,8 +101,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmerge_vvm_i8m4_tu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmerge_vvm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, vbool2_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m4_tu( @@ -110,8 +110,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmerge_vxm_i8m4_tu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmerge_vxm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, vbool2_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m8_tu( @@ -119,8 +119,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmerge_vvm_i8m8_tu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmerge_vvm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, vbool1_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m8_tu( @@ -128,8 +128,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmerge_vxm_i8m8_tu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmerge_vxm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, vbool1_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf4_tu( @@ -137,8 +137,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmerge_vvm_i16mf4_tu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmerge_vvm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf4_tu( @@ -146,8 +146,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmerge_vxm_i16mf4_tu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmerge_vxm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf2_tu( @@ -155,8 +155,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmerge_vvm_i16mf2_tu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmerge_vvm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf2_tu( @@ -164,8 +164,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmerge_vxm_i16mf2_tu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmerge_vxm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m1_tu( @@ -173,8 +173,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmerge_vvm_i16m1_tu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmerge_vvm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m1_tu( @@ -182,8 +182,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmerge_vxm_i16m1_tu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmerge_vxm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m2_tu( @@ -191,8 +191,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmerge_vvm_i16m2_tu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmerge_vvm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m2_tu( @@ -200,8 +200,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmerge_vxm_i16m2_tu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmerge_vxm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m4_tu( @@ -209,8 +209,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmerge_vvm_i16m4_tu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmerge_vvm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m4_tu( @@ -218,8 +218,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmerge_vxm_i16m4_tu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmerge_vxm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, vbool4_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m8_tu( @@ -227,8 +227,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmerge_vvm_i16m8_tu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmerge_vvm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m8_tu( @@ -236,8 +236,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmerge_vxm_i16m8_tu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmerge_vxm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, vbool2_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2_tu( @@ -245,8 +245,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmerge_vvm_i32mf2_tu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmerge_vvm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2_tu( @@ -254,8 +254,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmerge_vxm_i32mf2_tu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmerge_vxm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m1_tu( @@ -263,8 +263,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmerge_vvm_i32m1_tu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmerge_vvm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m1_tu( @@ -272,8 +272,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmerge_vxm_i32m1_tu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmerge_vxm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m2_tu( @@ -281,8 +281,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmerge_vvm_i32m2_tu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmerge_vvm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m2_tu( @@ -290,8 +290,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmerge_vxm_i32m2_tu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmerge_vxm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m4_tu( @@ -299,8 +299,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmerge_vvm_i32m4_tu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmerge_vvm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m4_tu( @@ -308,8 +308,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmerge_vxm_i32m4_tu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmerge_vxm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m8_tu( @@ -317,8 +317,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmerge_vvm_i32m8_tu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmerge_vvm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m8_tu( @@ -326,8 +326,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmerge_vxm_i32m8_tu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmerge_vxm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, vbool4_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m1_tu( @@ -335,8 +335,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmerge_vvm_i64m1_tu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmerge_vvm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m1_tu( @@ -344,8 +344,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmerge_vxm_i64m1_tu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmerge_vxm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m2_tu( @@ -353,8 +353,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmerge_vvm_i64m2_tu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmerge_vvm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m2_tu( @@ -362,8 +362,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmerge_vxm_i64m2_tu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmerge_vxm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m4_tu( @@ -371,8 +371,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmerge_vvm_i64m4_tu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmerge_vvm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m4_tu( @@ -380,8 +380,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmerge_vxm_i64m4_tu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmerge_vxm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m8_tu( @@ -389,8 +389,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmerge_vvm_i64m8_tu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmerge_vvm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m8_tu( @@ -398,8 +398,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmerge_vxm_i64m8_tu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmerge_vxm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf8_tu( @@ -407,8 +407,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmerge_vvm_u8mf8_tu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmerge_vvm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf8_tu( @@ -416,8 +416,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmerge_vxm_u8mf8_tu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmerge_vxm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf4_tu( @@ -425,8 +425,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmerge_vvm_u8mf4_tu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmerge_vvm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf4_tu( @@ -434,8 +434,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmerge_vxm_u8mf4_tu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmerge_vxm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf2_tu( @@ -443,8 +443,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmerge_vvm_u8mf2_tu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmerge_vvm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf2_tu( @@ -452,8 +452,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmerge_vxm_u8mf2_tu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmerge_vxm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m1_tu( @@ -461,8 +461,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmerge_vvm_u8m1_tu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmerge_vvm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m1_tu( @@ -470,8 +470,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmerge_vxm_u8m1_tu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmerge_vxm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m2_tu( @@ -479,8 +479,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmerge_vvm_u8m2_tu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmerge_vvm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, vbool4_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m2_tu( @@ -488,8 +488,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmerge_vxm_u8m2_tu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmerge_vxm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, vbool4_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m4_tu( @@ -497,8 +497,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmerge_vvm_u8m4_tu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmerge_vvm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, vbool2_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m4_tu( @@ -506,8 +506,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmerge_vxm_u8m4_tu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmerge_vxm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, vbool2_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m8_tu( @@ -515,8 +515,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmerge_vvm_u8m8_tu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmerge_vvm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, vbool1_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m8_tu( @@ -524,8 +524,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmerge_vxm_u8m8_tu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmerge_vxm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, vbool1_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf4_tu( @@ -533,8 +533,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmerge_vvm_u16mf4_tu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmerge_vvm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf4_tu( @@ -542,8 +542,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmerge_vxm_u16mf4_tu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmerge_vxm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf2_tu( @@ -551,8 +551,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmerge_vvm_u16mf2_tu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmerge_vvm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf2_tu( @@ -560,8 +560,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmerge_vxm_u16mf2_tu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmerge_vxm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m1_tu( @@ -569,8 +569,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmerge_vvm_u16m1_tu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmerge_vvm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m1_tu( @@ -578,8 +578,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmerge_vxm_u16m1_tu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmerge_vxm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m2_tu( @@ -587,8 +587,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmerge_vvm_u16m2_tu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmerge_vvm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m2_tu( @@ -596,8 +596,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmerge_vxm_u16m2_tu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmerge_vxm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m4_tu( @@ -605,8 +605,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmerge_vvm_u16m4_tu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmerge_vvm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m4_tu( @@ -614,8 +614,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmerge_vxm_u16m4_tu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmerge_vxm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, vbool4_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m8_tu( @@ -623,8 +623,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmerge_vvm_u16m8_tu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmerge_vvm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m8_tu( @@ -632,8 +632,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmerge_vxm_u16m8_tu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmerge_vxm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, vbool2_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2_tu( @@ -641,8 +641,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmerge_vvm_u32mf2_tu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmerge_vvm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2_tu( @@ -650,8 +650,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmerge_vxm_u32mf2_tu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmerge_vxm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m1_tu( @@ -659,8 +659,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmerge_vvm_u32m1_tu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmerge_vvm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m1_tu( @@ -668,8 +668,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmerge_vxm_u32m1_tu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmerge_vxm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m2_tu( @@ -677,8 +677,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmerge_vvm_u32m2_tu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmerge_vvm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m2_tu( @@ -686,8 +686,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmerge_vxm_u32m2_tu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmerge_vxm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m4_tu( @@ -695,8 +695,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmerge_vvm_u32m4_tu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmerge_vvm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m4_tu( @@ -704,8 +704,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmerge_vxm_u32m4_tu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmerge_vxm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m8_tu( @@ -713,8 +713,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmerge_vvm_u32m8_tu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmerge_vvm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m8_tu( @@ -722,8 +722,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmerge_vxm_u32m8_tu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmerge_vxm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, vbool4_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m1_tu( @@ -731,8 +731,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmerge_vvm_u64m1_tu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmerge_vvm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m1_tu( @@ -740,8 +740,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmerge_vxm_u64m1_tu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmerge_vxm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m2_tu( @@ -749,8 +749,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmerge_vvm_u64m2_tu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmerge_vvm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m2_tu( @@ -758,8 +758,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmerge_vxm_u64m2_tu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmerge_vxm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m4_tu( @@ -767,8 +767,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmerge_vvm_u64m4_tu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmerge_vvm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m4_tu( @@ -776,8 +776,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmerge_vxm_u64m4_tu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmerge_vxm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m8_tu( @@ -785,8 +785,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmerge_vvm_u64m8_tu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmerge_vvm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m8_tu( @@ -794,8 +794,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmerge_vxm_u64m8_tu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmerge_vxm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf8_ta( @@ -803,8 +803,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmerge_vvm_i8mf8_ta(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint8mf8_t test_vmerge_vvm_i8mf8_ta(vint8mf8_t op1, vint8mf8_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf8_ta( @@ -812,8 +812,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmerge_vxm_i8mf8_ta(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint8mf8_t test_vmerge_vxm_i8mf8_ta(vint8mf8_t op1, int8_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf4_ta( @@ -821,8 +821,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmerge_vvm_i8mf4_ta(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint8mf4_t test_vmerge_vvm_i8mf4_ta(vint8mf4_t op1, vint8mf4_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf4_ta( @@ -830,8 +830,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmerge_vxm_i8mf4_ta(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint8mf4_t test_vmerge_vxm_i8mf4_ta(vint8mf4_t op1, int8_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf2_ta( @@ -839,8 +839,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmerge_vvm_i8mf2_ta(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint8mf2_t test_vmerge_vvm_i8mf2_ta(vint8mf2_t op1, vint8mf2_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf2_ta( @@ -848,8 +848,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmerge_vxm_i8mf2_ta(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint8mf2_t test_vmerge_vxm_i8mf2_ta(vint8mf2_t op1, int8_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m1_ta( @@ -857,8 +857,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmerge_vvm_i8m1_ta(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint8m1_t test_vmerge_vvm_i8m1_ta(vint8m1_t op1, vint8m1_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m1_ta( @@ -866,8 +866,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmerge_vxm_i8m1_ta(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint8m1_t test_vmerge_vxm_i8m1_ta(vint8m1_t op1, int8_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m2_ta( @@ -875,8 +875,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmerge_vvm_i8m2_ta(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint8m2_t test_vmerge_vvm_i8m2_ta(vint8m2_t op1, vint8m2_t op2, vbool4_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m2_ta( @@ -884,8 +884,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmerge_vxm_i8m2_ta(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint8m2_t test_vmerge_vxm_i8m2_ta(vint8m2_t op1, int8_t op2, vbool4_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m4_ta( @@ -893,8 +893,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmerge_vvm_i8m4_ta(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint8m4_t test_vmerge_vvm_i8m4_ta(vint8m4_t op1, vint8m4_t op2, vbool2_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m4_ta( @@ -902,8 +902,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmerge_vxm_i8m4_ta(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint8m4_t test_vmerge_vxm_i8m4_ta(vint8m4_t op1, int8_t op2, vbool2_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m8_ta( @@ -911,8 +911,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmerge_vvm_i8m8_ta(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint8m8_t test_vmerge_vvm_i8m8_ta(vint8m8_t op1, vint8m8_t op2, vbool1_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m8_ta( @@ -920,8 +920,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmerge_vxm_i8m8_ta(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint8m8_t test_vmerge_vxm_i8m8_ta(vint8m8_t op1, int8_t op2, vbool1_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf4_ta( @@ -929,8 +929,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmerge_vvm_i16mf4_ta(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint16mf4_t test_vmerge_vvm_i16mf4_ta(vint16mf4_t op1, vint16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf4_ta( @@ -938,8 +938,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmerge_vxm_i16mf4_ta(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint16mf4_t test_vmerge_vxm_i16mf4_ta(vint16mf4_t op1, int16_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf2_ta( @@ -947,8 +947,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmerge_vvm_i16mf2_ta(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint16mf2_t test_vmerge_vvm_i16mf2_ta(vint16mf2_t op1, vint16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf2_ta( @@ -956,8 +956,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmerge_vxm_i16mf2_ta(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint16mf2_t test_vmerge_vxm_i16mf2_ta(vint16mf2_t op1, int16_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m1_ta( @@ -965,8 +965,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmerge_vvm_i16m1_ta(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint16m1_t test_vmerge_vvm_i16m1_ta(vint16m1_t op1, vint16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m1_ta( @@ -974,8 +974,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmerge_vxm_i16m1_ta(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint16m1_t test_vmerge_vxm_i16m1_ta(vint16m1_t op1, int16_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m2_ta( @@ -983,8 +983,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmerge_vvm_i16m2_ta(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint16m2_t test_vmerge_vvm_i16m2_ta(vint16m2_t op1, vint16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m2_ta( @@ -992,8 +992,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmerge_vxm_i16m2_ta(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint16m2_t test_vmerge_vxm_i16m2_ta(vint16m2_t op1, int16_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m4_ta( @@ -1001,8 +1001,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmerge_vvm_i16m4_ta(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint16m4_t test_vmerge_vvm_i16m4_ta(vint16m4_t op1, vint16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m4_ta( @@ -1010,8 +1010,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmerge_vxm_i16m4_ta(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint16m4_t test_vmerge_vxm_i16m4_ta(vint16m4_t op1, int16_t op2, vbool4_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m8_ta( @@ -1019,8 +1019,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmerge_vvm_i16m8_ta(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint16m8_t test_vmerge_vvm_i16m8_ta(vint16m8_t op1, vint16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m8_ta( @@ -1028,8 +1028,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmerge_vxm_i16m8_ta(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint16m8_t test_vmerge_vxm_i16m8_ta(vint16m8_t op1, int16_t op2, vbool2_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2_ta( @@ -1037,8 +1037,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmerge_vvm_i32mf2_ta(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint32mf2_t test_vmerge_vvm_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2_ta( @@ -1046,8 +1046,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmerge_vxm_i32mf2_ta(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint32mf2_t test_vmerge_vxm_i32mf2_ta(vint32mf2_t op1, int32_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m1_ta( @@ -1055,8 +1055,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmerge_vvm_i32m1_ta(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint32m1_t test_vmerge_vvm_i32m1_ta(vint32m1_t op1, vint32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m1_ta( @@ -1064,8 +1064,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmerge_vxm_i32m1_ta(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint32m1_t test_vmerge_vxm_i32m1_ta(vint32m1_t op1, int32_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m2_ta( @@ -1073,8 +1073,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmerge_vvm_i32m2_ta(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint32m2_t test_vmerge_vvm_i32m2_ta(vint32m2_t op1, vint32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m2_ta( @@ -1082,8 +1082,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmerge_vxm_i32m2_ta(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint32m2_t test_vmerge_vxm_i32m2_ta(vint32m2_t op1, int32_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m4_ta( @@ -1091,8 +1091,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmerge_vvm_i32m4_ta(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint32m4_t test_vmerge_vvm_i32m4_ta(vint32m4_t op1, vint32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m4_ta( @@ -1100,8 +1100,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmerge_vxm_i32m4_ta(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint32m4_t test_vmerge_vxm_i32m4_ta(vint32m4_t op1, int32_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m8_ta( @@ -1109,8 +1109,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmerge_vvm_i32m8_ta(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint32m8_t test_vmerge_vvm_i32m8_ta(vint32m8_t op1, vint32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m8_ta( @@ -1118,8 +1118,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmerge_vxm_i32m8_ta(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint32m8_t test_vmerge_vxm_i32m8_ta(vint32m8_t op1, int32_t op2, vbool4_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m1_ta( @@ -1127,8 +1127,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmerge_vvm_i64m1_ta(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint64m1_t test_vmerge_vvm_i64m1_ta(vint64m1_t op1, vint64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m1_ta( @@ -1136,8 +1136,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmerge_vxm_i64m1_ta(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint64m1_t test_vmerge_vxm_i64m1_ta(vint64m1_t op1, int64_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m2_ta( @@ -1145,8 +1145,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmerge_vvm_i64m2_ta(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint64m2_t test_vmerge_vvm_i64m2_ta(vint64m2_t op1, vint64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m2_ta( @@ -1154,8 +1154,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmerge_vxm_i64m2_ta(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint64m2_t test_vmerge_vxm_i64m2_ta(vint64m2_t op1, int64_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m4_ta( @@ -1163,8 +1163,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmerge_vvm_i64m4_ta(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint64m4_t test_vmerge_vvm_i64m4_ta(vint64m4_t op1, vint64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m4_ta( @@ -1172,8 +1172,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmerge_vxm_i64m4_ta(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint64m4_t test_vmerge_vxm_i64m4_ta(vint64m4_t op1, int64_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m8_ta( @@ -1181,8 +1181,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmerge_vvm_i64m8_ta(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint64m8_t test_vmerge_vvm_i64m8_ta(vint64m8_t op1, vint64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m8_ta( @@ -1190,8 +1190,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmerge_vxm_i64m8_ta(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint64m8_t test_vmerge_vxm_i64m8_ta(vint64m8_t op1, int64_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf8_ta( @@ -1199,8 +1199,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmerge_vvm_u8mf8_ta(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint8mf8_t test_vmerge_vvm_u8mf8_ta(vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf8_ta( @@ -1208,8 +1208,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmerge_vxm_u8mf8_ta(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint8mf8_t test_vmerge_vxm_u8mf8_ta(vuint8mf8_t op1, uint8_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf4_ta( @@ -1217,8 +1217,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmerge_vvm_u8mf4_ta(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint8mf4_t test_vmerge_vvm_u8mf4_ta(vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf4_ta( @@ -1226,8 +1226,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmerge_vxm_u8mf4_ta(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint8mf4_t test_vmerge_vxm_u8mf4_ta(vuint8mf4_t op1, uint8_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf2_ta( @@ -1235,8 +1235,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmerge_vvm_u8mf2_ta(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint8mf2_t test_vmerge_vvm_u8mf2_ta(vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf2_ta( @@ -1244,8 +1244,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmerge_vxm_u8mf2_ta(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint8mf2_t test_vmerge_vxm_u8mf2_ta(vuint8mf2_t op1, uint8_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m1_ta( @@ -1253,8 +1253,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmerge_vvm_u8m1_ta(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint8m1_t test_vmerge_vvm_u8m1_ta(vuint8m1_t op1, vuint8m1_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m1_ta( @@ -1262,8 +1262,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmerge_vxm_u8m1_ta(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint8m1_t test_vmerge_vxm_u8m1_ta(vuint8m1_t op1, uint8_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m2_ta( @@ -1271,8 +1271,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmerge_vvm_u8m2_ta(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint8m2_t test_vmerge_vvm_u8m2_ta(vuint8m2_t op1, vuint8m2_t op2, vbool4_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m2_ta( @@ -1280,8 +1280,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmerge_vxm_u8m2_ta(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint8m2_t test_vmerge_vxm_u8m2_ta(vuint8m2_t op1, uint8_t op2, vbool4_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m4_ta( @@ -1289,8 +1289,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmerge_vvm_u8m4_ta(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint8m4_t test_vmerge_vvm_u8m4_ta(vuint8m4_t op1, vuint8m4_t op2, vbool2_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m4_ta( @@ -1298,8 +1298,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmerge_vxm_u8m4_ta(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint8m4_t test_vmerge_vxm_u8m4_ta(vuint8m4_t op1, uint8_t op2, vbool2_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m8_ta( @@ -1307,8 +1307,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmerge_vvm_u8m8_ta(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint8m8_t test_vmerge_vvm_u8m8_ta(vuint8m8_t op1, vuint8m8_t op2, vbool1_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m8_ta( @@ -1316,8 +1316,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmerge_vxm_u8m8_ta(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint8m8_t test_vmerge_vxm_u8m8_ta(vuint8m8_t op1, uint8_t op2, vbool1_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf4_ta( @@ -1325,8 +1325,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmerge_vvm_u16mf4_ta(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint16mf4_t test_vmerge_vvm_u16mf4_ta(vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf4_ta( @@ -1334,8 +1334,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmerge_vxm_u16mf4_ta(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint16mf4_t test_vmerge_vxm_u16mf4_ta(vuint16mf4_t op1, uint16_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf2_ta( @@ -1343,8 +1343,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmerge_vvm_u16mf2_ta(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint16mf2_t test_vmerge_vvm_u16mf2_ta(vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf2_ta( @@ -1352,8 +1352,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmerge_vxm_u16mf2_ta(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint16mf2_t test_vmerge_vxm_u16mf2_ta(vuint16mf2_t op1, uint16_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m1_ta( @@ -1361,8 +1361,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmerge_vvm_u16m1_ta(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint16m1_t test_vmerge_vvm_u16m1_ta(vuint16m1_t op1, vuint16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m1_ta( @@ -1370,8 +1370,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmerge_vxm_u16m1_ta(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint16m1_t test_vmerge_vxm_u16m1_ta(vuint16m1_t op1, uint16_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m2_ta( @@ -1379,8 +1379,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmerge_vvm_u16m2_ta(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint16m2_t test_vmerge_vvm_u16m2_ta(vuint16m2_t op1, vuint16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m2_ta( @@ -1388,8 +1388,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmerge_vxm_u16m2_ta(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint16m2_t test_vmerge_vxm_u16m2_ta(vuint16m2_t op1, uint16_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m4_ta( @@ -1397,8 +1397,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmerge_vvm_u16m4_ta(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint16m4_t test_vmerge_vvm_u16m4_ta(vuint16m4_t op1, vuint16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m4_ta( @@ -1406,8 +1406,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmerge_vxm_u16m4_ta(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint16m4_t test_vmerge_vxm_u16m4_ta(vuint16m4_t op1, uint16_t op2, vbool4_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m8_ta( @@ -1415,8 +1415,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmerge_vvm_u16m8_ta(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint16m8_t test_vmerge_vvm_u16m8_ta(vuint16m8_t op1, vuint16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m8_ta( @@ -1424,8 +1424,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmerge_vxm_u16m8_ta(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint16m8_t test_vmerge_vxm_u16m8_ta(vuint16m8_t op1, uint16_t op2, vbool2_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2_ta( @@ -1433,8 +1433,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmerge_vvm_u32mf2_ta(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint32mf2_t test_vmerge_vvm_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2_ta( @@ -1442,8 +1442,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmerge_vxm_u32mf2_ta(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint32mf2_t test_vmerge_vxm_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m1_ta( @@ -1451,8 +1451,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmerge_vvm_u32m1_ta(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint32m1_t test_vmerge_vvm_u32m1_ta(vuint32m1_t op1, vuint32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m1_ta( @@ -1460,8 +1460,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmerge_vxm_u32m1_ta(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint32m1_t test_vmerge_vxm_u32m1_ta(vuint32m1_t op1, uint32_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m2_ta( @@ -1469,8 +1469,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmerge_vvm_u32m2_ta(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint32m2_t test_vmerge_vvm_u32m2_ta(vuint32m2_t op1, vuint32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m2_ta( @@ -1478,8 +1478,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmerge_vxm_u32m2_ta(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint32m2_t test_vmerge_vxm_u32m2_ta(vuint32m2_t op1, uint32_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m4_ta( @@ -1487,8 +1487,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmerge_vvm_u32m4_ta(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint32m4_t test_vmerge_vvm_u32m4_ta(vuint32m4_t op1, vuint32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m4_ta( @@ -1496,8 +1496,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmerge_vxm_u32m4_ta(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint32m4_t test_vmerge_vxm_u32m4_ta(vuint32m4_t op1, uint32_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m8_ta( @@ -1505,8 +1505,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmerge_vvm_u32m8_ta(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint32m8_t test_vmerge_vvm_u32m8_ta(vuint32m8_t op1, vuint32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m8_ta( @@ -1514,8 +1514,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmerge_vxm_u32m8_ta(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint32m8_t test_vmerge_vxm_u32m8_ta(vuint32m8_t op1, uint32_t op2, vbool4_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m1_ta( @@ -1523,8 +1523,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmerge_vvm_u64m1_ta(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint64m1_t test_vmerge_vvm_u64m1_ta(vuint64m1_t op1, vuint64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m1_ta( @@ -1532,8 +1532,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmerge_vxm_u64m1_ta(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint64m1_t test_vmerge_vxm_u64m1_ta(vuint64m1_t op1, uint64_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m2_ta( @@ -1541,8 +1541,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmerge_vvm_u64m2_ta(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint64m2_t test_vmerge_vvm_u64m2_ta(vuint64m2_t op1, vuint64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m2_ta( @@ -1550,8 +1550,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmerge_vxm_u64m2_ta(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint64m2_t test_vmerge_vxm_u64m2_ta(vuint64m2_t op1, uint64_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m4_ta( @@ -1559,8 +1559,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmerge_vvm_u64m4_ta(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint64m4_t test_vmerge_vvm_u64m4_ta(vuint64m4_t op1, vuint64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m4_ta( @@ -1568,8 +1568,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmerge_vxm_u64m4_ta(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint64m4_t test_vmerge_vxm_u64m4_ta(vuint64m4_t op1, uint64_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m8_ta( @@ -1577,8 +1577,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmerge_vvm_u64m8_ta(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint64m8_t test_vmerge_vvm_u64m8_ta(vuint64m8_t op1, vuint64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m8_ta( @@ -1586,8 +1586,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmerge_vxm_u64m8_ta(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vuint64m8_t test_vmerge_vxm_u64m8_ta(vuint64m8_t op1, uint64_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf4_tu( @@ -1595,8 +1595,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vmerge_vvm_f16mf4_tu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vmerge_vvm_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf2_tu( @@ -1604,8 +1604,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vmerge_vvm_f16mf2_tu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vmerge_vvm_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m1_tu( @@ -1613,8 +1613,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vmerge_vvm_f16m1_tu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vmerge_vvm_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m2_tu( @@ -1622,8 +1622,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vmerge_vvm_f16m2_tu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vmerge_vvm_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m4_tu( @@ -1631,8 +1631,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vmerge_vvm_f16m4_tu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vmerge_vvm_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m8_tu( @@ -1640,8 +1640,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vmerge_vvm_f16m8_tu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vmerge_vvm_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2_tu( @@ -1649,8 +1649,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vmerge_vvm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vmerge_vvm_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m1_tu( @@ -1658,8 +1658,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vmerge_vvm_f32m1_tu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vmerge_vvm_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m2_tu( @@ -1667,8 +1667,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vmerge_vvm_f32m2_tu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vmerge_vvm_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m4_tu( @@ -1676,8 +1676,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vmerge_vvm_f32m4_tu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vmerge_vvm_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m8_tu( @@ -1685,8 +1685,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vmerge_vvm_f32m8_tu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vmerge_vvm_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m1_tu( @@ -1694,8 +1694,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vmerge_vvm_f64m1_tu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vmerge_vvm_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m2_tu( @@ -1703,8 +1703,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vmerge_vvm_f64m2_tu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vmerge_vvm_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m4_tu( @@ -1712,8 +1712,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vmerge_vvm_f64m4_tu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vmerge_vvm_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m8_tu( @@ -1721,8 +1721,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vmerge_vvm_f64m8_tu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vmerge_tu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vmerge_vvm_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf4_ta( @@ -1730,8 +1730,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vmerge_vvm_f16mf4_ta(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vfloat16mf4_t test_vmerge_vvm_f16mf4_ta(vfloat16mf4_t op1, vfloat16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf2_ta( @@ -1739,8 +1739,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vmerge_vvm_f16mf2_ta(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vfloat16mf2_t test_vmerge_vvm_f16mf2_ta(vfloat16mf2_t op1, vfloat16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m1_ta( @@ -1748,8 +1748,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vmerge_vvm_f16m1_ta(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vfloat16m1_t test_vmerge_vvm_f16m1_ta(vfloat16m1_t op1, vfloat16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m2_ta( @@ -1757,8 +1757,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vmerge_vvm_f16m2_ta(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vfloat16m2_t test_vmerge_vvm_f16m2_ta(vfloat16m2_t op1, vfloat16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m4_ta( @@ -1766,8 +1766,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vmerge_vvm_f16m4_ta(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vfloat16m4_t test_vmerge_vvm_f16m4_ta(vfloat16m4_t op1, vfloat16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m8_ta( @@ -1775,8 +1775,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vmerge_vvm_f16m8_ta(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vfloat16m8_t test_vmerge_vvm_f16m8_ta(vfloat16m8_t op1, vfloat16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2_ta( @@ -1784,8 +1784,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vmerge_vvm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vfloat32mf2_t test_vmerge_vvm_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m1_ta( @@ -1793,8 +1793,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vmerge_vvm_f32m1_ta(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vfloat32m1_t test_vmerge_vvm_f32m1_ta(vfloat32m1_t op1, vfloat32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m2_ta( @@ -1802,8 +1802,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vmerge_vvm_f32m2_ta(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vfloat32m2_t test_vmerge_vvm_f32m2_ta(vfloat32m2_t op1, vfloat32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m4_ta( @@ -1811,8 +1811,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vmerge_vvm_f32m4_ta(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vfloat32m4_t test_vmerge_vvm_f32m4_ta(vfloat32m4_t op1, vfloat32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m8_ta( @@ -1820,8 +1820,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vmerge_vvm_f32m8_ta(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vfloat32m8_t test_vmerge_vvm_f32m8_ta(vfloat32m8_t op1, vfloat32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m1_ta( @@ -1829,8 +1829,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vmerge_vvm_f64m1_ta(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vfloat64m1_t test_vmerge_vvm_f64m1_ta(vfloat64m1_t op1, vfloat64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m2_ta( @@ -1838,8 +1838,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vmerge_vvm_f64m2_ta(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vfloat64m2_t test_vmerge_vvm_f64m2_ta(vfloat64m2_t op1, vfloat64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m4_ta( @@ -1847,8 +1847,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vmerge_vvm_f64m4_ta(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vfloat64m4_t test_vmerge_vvm_f64m4_ta(vfloat64m4_t op1, vfloat64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m8_ta( @@ -1856,7 +1856,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vmerge_vvm_f64m8_ta(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vfloat64m8_t test_vmerge_vvm_f64m8_ta(vfloat64m8_t op1, vfloat64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); }