diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -137,8 +137,6 @@ def NonePolicy : PolicyScheme<0>; def HasPassthruOperand : PolicyScheme<1>; def HasPolicyOperand : PolicyScheme<2>; -// Specail case for passthru operand which is not a first opeand. -def HasPassthruOperandAtIdx1 : PolicyScheme<3>; class RVVBuiltin { @@ -1831,20 +1829,13 @@ // 12.15. Vector Integer Merge Instructions // C/C++ Operand: (mask, op1, op2, vl), Intrinsic: (passthru, op1, op2, mask, vl) let HasMasked = false, - UnMaskedPolicyScheme = HasPassthruOperandAtIdx1, - MaskedPolicyScheme = NonePolicy, - ManualCodegen = [{ - std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); - // insert poison passthru - if (PolicyAttrs == TAIL_AGNOSTIC) - Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType)); - IntrinsicTypes = {ResultType, Ops[2]->getType(), Ops.back()->getType()}; - }] in { + UnMaskedPolicyScheme = HasPassthruOperand, + MaskedPolicyScheme = NonePolicy in { defm vmerge : RVVOutOp1BuiltinSet<"vmerge", "csil", - [["vvm", "v", "vmvv"], - ["vxm", "v", "vmve"], - ["vvm", "Uv", "UvmUvUv"], - ["vxm", "Uv", "UvmUvUe"]]>; + [["vvm", "v", "vvvm"], + ["vxm", "v", "vvem"], + ["vvm", "Uv", "UvUvUvm"], + ["vxm", "Uv", "UvUvUem"]]>; } // 12.16. Vector Integer Move Instructions @@ -1975,19 +1966,12 @@ // 14.15. Vector Floating-Point Merge Instructio // C/C++ Operand: (mask, op1, op2, vl), Builtin: (op1, op2, mask, vl) let HasMasked = false, - UnMaskedPolicyScheme = HasPassthruOperandAtIdx1, - MaskedPolicyScheme = NonePolicy, - ManualCodegen = [{ - std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); - // insert poison passthru - if (PolicyAttrs == TAIL_AGNOSTIC) - Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType)); - IntrinsicTypes = {ResultType, Ops[2]->getType(), Ops.back()->getType()}; - }] in { + UnMaskedPolicyScheme = HasPassthruOperand, + MaskedPolicyScheme = NonePolicy in { defm vmerge : RVVOutOp1BuiltinSet<"vmerge", "xfd", - [["vvm", "v", "vmvv"]]>; + [["vvm", "v", "vvvm"]]>; defm vfmerge : RVVOutOp1BuiltinSet<"vfmerge", "xfd", - [["vfm", "v", "vmve"]]>; + [["vfm", "v", "vvem"]]>; } // 14.16. Vector Floating-Point Move Instruction @@ -2175,21 +2159,14 @@ // 17.5. Vector Compress Instruction let IsPrototypeDefaultTU = true, HasMasked = false, - UnMaskedPolicyScheme = HasPassthruOperandAtIdx1, - MaskedPolicyScheme = NonePolicy, - ManualCodegen = [{ - std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); - // insert poison passthru - if (PolicyAttrs == TAIL_AGNOSTIC) - Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType)); - IntrinsicTypes = {ResultType, Ops.back()->getType()}; - }] in { + UnMaskedPolicyScheme = HasPassthruOperand, + MaskedPolicyScheme = NonePolicy in { // signed and floating type defm vcompress : RVVOutBuiltinSet<"vcompress", "csilxfd", - [["vm", "v", "vmvv"]]>; + [["vm", "v", "vvvm"]]>; // unsigned type defm vcompress : RVVOutBuiltinSet<"vcompress", "csil", - [["vm", "Uv", "UvmUvUv"]]>; + [["vm", "Uv", "UvUvUvm"]]>; } // Miscellaneous diff --git a/clang/include/clang/Support/RISCVVIntrinsicUtils.h b/clang/include/clang/Support/RISCVVIntrinsicUtils.h --- a/clang/include/clang/Support/RISCVVIntrinsicUtils.h +++ b/clang/include/clang/Support/RISCVVIntrinsicUtils.h @@ -366,9 +366,6 @@ // Passthru operand is at first parameter in C builtin. HasPassthruOperand, HasPolicyOperand, - // Special case for vmerge, the passthru operand is second - // parameter in C builtin. - HasPassthruOperandAtIdx1, }; // TODO refactor RVVIntrinsic class design after support all intrinsic diff --git a/clang/lib/Support/RISCVVIntrinsicUtils.cpp b/clang/lib/Support/RISCVVIntrinsicUtils.cpp --- a/clang/lib/Support/RISCVVIntrinsicUtils.cpp +++ b/clang/lib/Support/RISCVVIntrinsicUtils.cpp @@ -963,15 +963,6 @@ else if (PolicyAttrs.isTAPolicy() && HasPassthruOp && IsPrototypeDefaultTU) NewPrototype.erase(NewPrototype.begin() + 1); - if (DefaultScheme == PolicyScheme::HasPassthruOperandAtIdx1) { - if (PolicyAttrs.isTUPolicy() && !IsPrototypeDefaultTU) { - // Insert undisturbed output to index 1 - NewPrototype.insert(NewPrototype.begin() + 2, NewPrototype[0]); - } else if (PolicyAttrs.isTAPolicy() && IsPrototypeDefaultTU) { - // Erase passthru for TA policy - NewPrototype.erase(NewPrototype.begin() + 2); - } - } } else if (PolicyAttrs.isTUPolicy() && HasPassthruOp) { // NF > 1 cases for segment load operations. // Convert diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vcompress.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vcompress.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vcompress.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vcompress.c @@ -1,6 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +v \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +v -target-feature +zfh -target-feature +experimental-zvfh \ // RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include @@ -10,8 +11,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vcompress_vm_i8mf8 (vbool64_t mask, vint8mf8_t dest, vint8mf8_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint8mf8_t test_vcompress_vm_i8mf8 (vint8mf8_t dest, vint8mf8_t src, vbool64_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf4( @@ -19,8 +20,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vcompress_vm_i8mf4 (vbool32_t mask, vint8mf4_t dest, vint8mf4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint8mf4_t test_vcompress_vm_i8mf4 (vint8mf4_t dest, vint8mf4_t src, vbool32_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf2( @@ -28,8 +29,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vcompress_vm_i8mf2 (vbool16_t mask, vint8mf2_t dest, vint8mf2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint8mf2_t test_vcompress_vm_i8mf2 (vint8mf2_t dest, vint8mf2_t src, vbool16_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m1( @@ -37,8 +38,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vcompress_vm_i8m1 (vbool8_t mask, vint8m1_t dest, vint8m1_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint8m1_t test_vcompress_vm_i8m1 (vint8m1_t dest, vint8m1_t src, vbool8_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m2( @@ -46,8 +47,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vcompress_vm_i8m2 (vbool4_t mask, vint8m2_t dest, vint8m2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint8m2_t test_vcompress_vm_i8m2 (vint8m2_t dest, vint8m2_t src, vbool4_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m4( @@ -55,8 +56,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vcompress_vm_i8m4 (vbool2_t mask, vint8m4_t dest, vint8m4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint8m4_t test_vcompress_vm_i8m4 (vint8m4_t dest, vint8m4_t src, vbool2_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m8( @@ -64,8 +65,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv64i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vcompress_vm_i8m8 (vbool1_t mask, vint8m8_t dest, vint8m8_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint8m8_t test_vcompress_vm_i8m8 (vint8m8_t dest, vint8m8_t src, vbool1_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf4( @@ -73,8 +74,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vcompress_vm_i16mf4 (vbool64_t mask, vint16mf4_t dest, vint16mf4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint16mf4_t test_vcompress_vm_i16mf4 (vint16mf4_t dest, vint16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf2( @@ -82,8 +83,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vcompress_vm_i16mf2 (vbool32_t mask, vint16mf2_t dest, vint16mf2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint16mf2_t test_vcompress_vm_i16mf2 (vint16mf2_t dest, vint16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m1( @@ -91,8 +92,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vcompress_vm_i16m1 (vbool16_t mask, vint16m1_t dest, vint16m1_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint16m1_t test_vcompress_vm_i16m1 (vint16m1_t dest, vint16m1_t src, vbool16_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m2( @@ -100,8 +101,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vcompress_vm_i16m2 (vbool8_t mask, vint16m2_t dest, vint16m2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint16m2_t test_vcompress_vm_i16m2 (vint16m2_t dest, vint16m2_t src, vbool8_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m4( @@ -109,8 +110,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vcompress_vm_i16m4 (vbool4_t mask, vint16m4_t dest, vint16m4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint16m4_t test_vcompress_vm_i16m4 (vint16m4_t dest, vint16m4_t src, vbool4_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m8( @@ -118,8 +119,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vcompress_vm_i16m8 (vbool2_t mask, vint16m8_t dest, vint16m8_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint16m8_t test_vcompress_vm_i16m8 (vint16m8_t dest, vint16m8_t src, vbool2_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2( @@ -127,8 +128,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vcompress_vm_i32mf2 (vbool64_t mask, vint32mf2_t dest, vint32mf2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint32mf2_t test_vcompress_vm_i32mf2 (vint32mf2_t dest, vint32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m1( @@ -136,8 +137,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vcompress_vm_i32m1 (vbool32_t mask, vint32m1_t dest, vint32m1_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint32m1_t test_vcompress_vm_i32m1 (vint32m1_t dest, vint32m1_t src, vbool32_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m2( @@ -145,8 +146,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vcompress_vm_i32m2 (vbool16_t mask, vint32m2_t dest, vint32m2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint32m2_t test_vcompress_vm_i32m2 (vint32m2_t dest, vint32m2_t src, vbool16_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m4( @@ -154,8 +155,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vcompress_vm_i32m4 (vbool8_t mask, vint32m4_t dest, vint32m4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint32m4_t test_vcompress_vm_i32m4 (vint32m4_t dest, vint32m4_t src, vbool8_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m8( @@ -163,8 +164,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vcompress_vm_i32m8 (vbool4_t mask, vint32m8_t dest, vint32m8_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint32m8_t test_vcompress_vm_i32m8 (vint32m8_t dest, vint32m8_t src, vbool4_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m1( @@ -172,8 +173,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vcompress_vm_i64m1 (vbool64_t mask, vint64m1_t dest, vint64m1_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint64m1_t test_vcompress_vm_i64m1 (vint64m1_t dest, vint64m1_t src, vbool64_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m2( @@ -181,8 +182,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vcompress_vm_i64m2 (vbool32_t mask, vint64m2_t dest, vint64m2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint64m2_t test_vcompress_vm_i64m2 (vint64m2_t dest, vint64m2_t src, vbool32_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m4( @@ -190,8 +191,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vcompress_vm_i64m4 (vbool16_t mask, vint64m4_t dest, vint64m4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint64m4_t test_vcompress_vm_i64m4 (vint64m4_t dest, vint64m4_t src, vbool16_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m8( @@ -199,8 +200,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vcompress_vm_i64m8 (vbool8_t mask, vint64m8_t dest, vint64m8_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint64m8_t test_vcompress_vm_i64m8 (vint64m8_t dest, vint64m8_t src, vbool8_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf8( @@ -208,8 +209,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vcompress_vm_u8mf8 (vbool64_t mask, vuint8mf8_t dest, vuint8mf8_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint8mf8_t test_vcompress_vm_u8mf8 (vuint8mf8_t dest, vuint8mf8_t src, vbool64_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf4( @@ -217,8 +218,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vcompress_vm_u8mf4 (vbool32_t mask, vuint8mf4_t dest, vuint8mf4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint8mf4_t test_vcompress_vm_u8mf4 (vuint8mf4_t dest, vuint8mf4_t src, vbool32_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf2( @@ -226,8 +227,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vcompress_vm_u8mf2 (vbool16_t mask, vuint8mf2_t dest, vuint8mf2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint8mf2_t test_vcompress_vm_u8mf2 (vuint8mf2_t dest, vuint8mf2_t src, vbool16_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m1( @@ -235,8 +236,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vcompress_vm_u8m1 (vbool8_t mask, vuint8m1_t dest, vuint8m1_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint8m1_t test_vcompress_vm_u8m1 (vuint8m1_t dest, vuint8m1_t src, vbool8_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m2( @@ -244,8 +245,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vcompress_vm_u8m2 (vbool4_t mask, vuint8m2_t dest, vuint8m2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint8m2_t test_vcompress_vm_u8m2 (vuint8m2_t dest, vuint8m2_t src, vbool4_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m4( @@ -253,8 +254,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vcompress_vm_u8m4 (vbool2_t mask, vuint8m4_t dest, vuint8m4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint8m4_t test_vcompress_vm_u8m4 (vuint8m4_t dest, vuint8m4_t src, vbool2_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m8( @@ -262,8 +263,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv64i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vcompress_vm_u8m8 (vbool1_t mask, vuint8m8_t dest, vuint8m8_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint8m8_t test_vcompress_vm_u8m8 (vuint8m8_t dest, vuint8m8_t src, vbool1_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf4( @@ -271,8 +272,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vcompress_vm_u16mf4 (vbool64_t mask, vuint16mf4_t dest, vuint16mf4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint16mf4_t test_vcompress_vm_u16mf4 (vuint16mf4_t dest, vuint16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf2( @@ -280,8 +281,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vcompress_vm_u16mf2 (vbool32_t mask, vuint16mf2_t dest, vuint16mf2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint16mf2_t test_vcompress_vm_u16mf2 (vuint16mf2_t dest, vuint16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m1( @@ -289,8 +290,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vcompress_vm_u16m1 (vbool16_t mask, vuint16m1_t dest, vuint16m1_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint16m1_t test_vcompress_vm_u16m1 (vuint16m1_t dest, vuint16m1_t src, vbool16_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m2( @@ -298,8 +299,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vcompress_vm_u16m2 (vbool8_t mask, vuint16m2_t dest, vuint16m2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint16m2_t test_vcompress_vm_u16m2 (vuint16m2_t dest, vuint16m2_t src, vbool8_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m4( @@ -307,8 +308,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vcompress_vm_u16m4 (vbool4_t mask, vuint16m4_t dest, vuint16m4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint16m4_t test_vcompress_vm_u16m4 (vuint16m4_t dest, vuint16m4_t src, vbool4_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m8( @@ -316,8 +317,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vcompress_vm_u16m8 (vbool2_t mask, vuint16m8_t dest, vuint16m8_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint16m8_t test_vcompress_vm_u16m8 (vuint16m8_t dest, vuint16m8_t src, vbool2_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2( @@ -325,8 +326,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vcompress_vm_u32mf2 (vbool64_t mask, vuint32mf2_t dest, vuint32mf2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint32mf2_t test_vcompress_vm_u32mf2 (vuint32mf2_t dest, vuint32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m1( @@ -334,8 +335,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vcompress_vm_u32m1 (vbool32_t mask, vuint32m1_t dest, vuint32m1_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint32m1_t test_vcompress_vm_u32m1 (vuint32m1_t dest, vuint32m1_t src, vbool32_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m2( @@ -343,8 +344,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vcompress_vm_u32m2 (vbool16_t mask, vuint32m2_t dest, vuint32m2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint32m2_t test_vcompress_vm_u32m2 (vuint32m2_t dest, vuint32m2_t src, vbool16_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m4( @@ -352,8 +353,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vcompress_vm_u32m4 (vbool8_t mask, vuint32m4_t dest, vuint32m4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint32m4_t test_vcompress_vm_u32m4 (vuint32m4_t dest, vuint32m4_t src, vbool8_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m8( @@ -361,8 +362,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vcompress_vm_u32m8 (vbool4_t mask, vuint32m8_t dest, vuint32m8_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint32m8_t test_vcompress_vm_u32m8 (vuint32m8_t dest, vuint32m8_t src, vbool4_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m1( @@ -370,8 +371,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vcompress_vm_u64m1 (vbool64_t mask, vuint64m1_t dest, vuint64m1_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint64m1_t test_vcompress_vm_u64m1 (vuint64m1_t dest, vuint64m1_t src, vbool64_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m2( @@ -379,8 +380,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vcompress_vm_u64m2 (vbool32_t mask, vuint64m2_t dest, vuint64m2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint64m2_t test_vcompress_vm_u64m2 (vuint64m2_t dest, vuint64m2_t src, vbool32_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m4( @@ -388,8 +389,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vcompress_vm_u64m4 (vbool16_t mask, vuint64m4_t dest, vuint64m4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint64m4_t test_vcompress_vm_u64m4 (vuint64m4_t dest, vuint64m4_t src, vbool16_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m8( @@ -397,8 +398,62 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vcompress_vm_u64m8 (vbool8_t mask, vuint64m8_t dest, vuint64m8_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint64m8_t test_vcompress_vm_u64m8 (vuint64m8_t dest, vuint64m8_t src, vbool8_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vcompress_vm_f16mf4 (vfloat16mf4_t dest, vfloat16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vcompress_vm_f16mf2 (vfloat16mf2_t dest, vfloat16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vcompress_vm_f16m1 (vfloat16m1_t dest, vfloat16m1_t src, vbool16_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vcompress_vm_f16m2 (vfloat16m2_t dest, vfloat16m2_t src, vbool8_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vcompress_vm_f16m4 (vfloat16m4_t dest, vfloat16m4_t src, vbool4_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vcompress_vm_f16m8 (vfloat16m8_t dest, vfloat16m8_t src, vbool2_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2( @@ -406,8 +461,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vcompress_vm_f32mf2 (vbool64_t mask, vfloat32mf2_t dest, vfloat32mf2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vfloat32mf2_t test_vcompress_vm_f32mf2 (vfloat32mf2_t dest, vfloat32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m1( @@ -415,8 +470,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vcompress_vm_f32m1 (vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vfloat32m1_t test_vcompress_vm_f32m1 (vfloat32m1_t dest, vfloat32m1_t src, vbool32_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m2( @@ -424,8 +479,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vcompress_vm_f32m2 (vbool16_t mask, vfloat32m2_t dest, vfloat32m2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vfloat32m2_t test_vcompress_vm_f32m2 (vfloat32m2_t dest, vfloat32m2_t src, vbool16_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m4( @@ -433,8 +488,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vcompress_vm_f32m4 (vbool8_t mask, vfloat32m4_t dest, vfloat32m4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vfloat32m4_t test_vcompress_vm_f32m4 (vfloat32m4_t dest, vfloat32m4_t src, vbool8_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m8( @@ -442,8 +497,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vcompress_vm_f32m8 (vbool4_t mask, vfloat32m8_t dest, vfloat32m8_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vfloat32m8_t test_vcompress_vm_f32m8 (vfloat32m8_t dest, vfloat32m8_t src, vbool4_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m1( @@ -451,8 +506,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vcompress_vm_f64m1 (vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vfloat64m1_t test_vcompress_vm_f64m1 (vfloat64m1_t dest, vfloat64m1_t src, vbool64_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m2( @@ -460,8 +515,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vcompress_vm_f64m2 (vbool32_t mask, vfloat64m2_t dest, vfloat64m2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vfloat64m2_t test_vcompress_vm_f64m2 (vfloat64m2_t dest, vfloat64m2_t src, vbool32_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m4( @@ -469,8 +524,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vcompress_vm_f64m4 (vbool16_t mask, vfloat64m4_t dest, vfloat64m4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vfloat64m4_t test_vcompress_vm_f64m4 (vfloat64m4_t dest, vfloat64m4_t src, vbool16_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m8( @@ -478,8 +533,125 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vcompress_vm_f64m8 (vbool8_t mask, vfloat64m8_t dest, vfloat64m8_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vfloat64m8_t test_vcompress_vm_f64m8 (vfloat64m8_t dest, vfloat64m8_t src, vbool8_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8mf8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vcompress_vm_i8mf8_tu (vint8mf8_t merge, vint8mf8_t src, vbool64_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vcompress_vm_i8mf4_tu (vint8mf4_t merge, vint8mf4_t src, vbool32_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vcompress_vm_i8mf2_tu (vint8mf2_t merge, vint8mf2_t src, vbool16_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vcompress_vm_i8m1_tu (vint8m1_t merge, vint8m1_t src, vbool8_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vcompress_vm_i8m2_tu (vint8m2_t merge, vint8m2_t src, vbool4_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vcompress_vm_i8m4_tu (vint8m4_t merge, vint8m4_t src, vbool2_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv64i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vcompress_vm_i8m8_tu (vint8m8_t merge, vint8m8_t src, vbool1_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vcompress_vm_i16mf4_tu (vint16mf4_t merge, vint16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vcompress_vm_i16mf2_tu (vint16mf2_t merge, vint16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vcompress_vm_i16m1_tu (vint16m1_t merge, vint16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vcompress_vm_i16m2_tu (vint16m2_t merge, vint16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vcompress_vm_i16m4_tu (vint16m4_t merge, vint16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vcompress_vm_i16m8_tu (vint16m8_t merge, vint16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2_tu( @@ -487,8 +659,197 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vcompress_vm_i32mf2_tu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, size_t vl) { - return vcompress_tu(mask, merge, src, vl); +vint32mf2_t test_vcompress_vm_i32mf2_tu (vint32mf2_t merge, vint32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vcompress_vm_i32m1_tu (vint32m1_t merge, vint32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vcompress_vm_i32m2_tu (vint32m2_t merge, vint32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vcompress_vm_i32m4_tu (vint32m4_t merge, vint32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vcompress_vm_i32m8_tu (vint32m8_t merge, vint32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vcompress_vm_i64m1_tu (vint64m1_t merge, vint64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vcompress_vm_i64m2_tu (vint64m2_t merge, vint64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vcompress_vm_i64m4_tu (vint64m4_t merge, vint64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vcompress_vm_i64m8_tu (vint64m8_t merge, vint64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8mf8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vcompress_vm_u8mf8_tu (vuint8mf8_t merge, vuint8mf8_t src, vbool64_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vcompress_vm_u8mf4_tu (vuint8mf4_t merge, vuint8mf4_t src, vbool32_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vcompress_vm_u8mf2_tu (vuint8mf2_t merge, vuint8mf2_t src, vbool16_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vcompress_vm_u8m1_tu (vuint8m1_t merge, vuint8m1_t src, vbool8_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vcompress_vm_u8m2_tu (vuint8m2_t merge, vuint8m2_t src, vbool4_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vcompress_vm_u8m4_tu (vuint8m4_t merge, vuint8m4_t src, vbool2_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv64i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vcompress_vm_u8m8_tu (vuint8m8_t merge, vuint8m8_t src, vbool1_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vcompress_vm_u16mf4_tu (vuint16mf4_t merge, vuint16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vcompress_vm_u16mf2_tu (vuint16mf2_t merge, vuint16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vcompress_vm_u16m1_tu (vuint16m1_t merge, vuint16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vcompress_vm_u16m2_tu (vuint16m2_t merge, vuint16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vcompress_vm_u16m4_tu (vuint16m4_t merge, vuint16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vcompress_vm_u16m8_tu (vuint16m8_t merge, vuint16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2_tu( @@ -496,42 +857,744 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vcompress_vm_u32mf2_tu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, size_t vl) { - return vcompress_tu(mask, merge, src, vl); +vuint32mf2_t test_vcompress_vm_u32mf2_tu (vuint32mf2_t merge, vuint32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); } -// CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2_tu( +// CHECK-RV64-LABEL: @test_vcompress_vm_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vcompress_vm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t src, size_t vl) { - return vcompress_tu(mask, merge, src, vl); +vuint32m1_t test_vcompress_vm_u32m1_tu (vuint32m1_t merge, vuint32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); } -// CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2_ta( +// CHECK-RV64-LABEL: @test_vcompress_vm_u32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vcompress_vm_i32mf2_ta(vbool64_t mask, vint32mf2_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vuint32m2_t test_vcompress_vm_u32m2_tu (vuint32m2_t merge, vuint32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); } -// CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2_ta( +// CHECK-RV64-LABEL: @test_vcompress_vm_u32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vcompress_vm_u32mf2_ta(vbool64_t mask, vuint32mf2_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vuint32m4_t test_vcompress_vm_u32m4_tu (vuint32m4_t merge, vuint32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); } -// CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2_ta( +// CHECK-RV64-LABEL: @test_vcompress_vm_u32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vcompress_vm_u32m8_tu (vuint32m8_t merge, vuint32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vcompress_vm_u64m1_tu (vuint64m1_t merge, vuint64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vcompress_vm_u64m2_tu (vuint64m2_t merge, vuint64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vcompress_vm_u64m4_tu (vuint64m4_t merge, vuint64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vcompress_vm_u64m8_tu (vuint64m8_t merge, vuint64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vcompress_vm_f16mf4_tu (vfloat16mf4_t merge, vfloat16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vcompress_vm_f16mf2_tu (vfloat16mf2_t merge, vfloat16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vcompress_vm_f16m1_tu (vfloat16m1_t merge, vfloat16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vcompress_vm_f16m2_tu (vfloat16m2_t merge, vfloat16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vcompress_vm_f16m4_tu (vfloat16m4_t merge, vfloat16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32f16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vcompress_vm_f16m8_tu (vfloat16m8_t merge, vfloat16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vcompress_vm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vfloat32mf2_t test_vcompress_vm_f32mf2_tu (vfloat32mf2_t merge, vfloat32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vcompress_vm_f32m1_tu (vfloat32m1_t merge, vfloat32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vcompress_vm_f32m2_tu (vfloat32m2_t merge, vfloat32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vcompress_vm_f32m4_tu (vfloat32m4_t merge, vfloat32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vcompress_vm_f32m8_tu (vfloat32m8_t merge, vfloat32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vcompress_vm_f64m1_tu (vfloat64m1_t merge, vfloat64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vcompress_vm_f64m2_tu (vfloat64m2_t merge, vfloat64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vcompress_vm_f64m4_tu (vfloat64m4_t merge, vfloat64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vcompress_vm_f64m8_tu (vfloat64m8_t merge, vfloat64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8mf8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i8.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vcompress_vm_i8mf8_ta (vint8mf8_t src, vbool64_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i8.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vcompress_vm_i8mf4_ta (vint8mf4_t src, vbool32_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i8.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vcompress_vm_i8mf2_ta (vint8mf2_t src, vbool16_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i8.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vcompress_vm_i8m1_ta (vint8m1_t src, vbool8_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i8.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vcompress_vm_i8m2_ta (vint8m2_t src, vbool4_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i8.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vcompress_vm_i8m4_ta (vint8m4_t src, vbool2_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv64i8.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vcompress_vm_i8m8_ta (vint8m8_t src, vbool1_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i16.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vcompress_vm_i16mf4_ta (vint16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i16.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vcompress_vm_i16mf2_ta (vint16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i16.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vcompress_vm_i16m1_ta (vint16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i16.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vcompress_vm_i16m2_ta (vint16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i16.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vcompress_vm_i16m4_ta (vint16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i16.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vcompress_vm_i16m8_ta (vint16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vcompress_vm_i32mf2_ta (vint32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vcompress_vm_i32m1_ta (vint32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vcompress_vm_i32m2_ta (vint32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vcompress_vm_i32m4_ta (vint32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vcompress_vm_i32m8_ta (vint32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i64.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vcompress_vm_i64m1_ta (vint64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i64.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vcompress_vm_i64m2_ta (vint64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i64.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vcompress_vm_i64m4_ta (vint64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i64.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vcompress_vm_i64m8_ta (vint64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8mf8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i8.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vcompress_vm_u8mf8_ta (vuint8mf8_t src, vbool64_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i8.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vcompress_vm_u8mf4_ta (vuint8mf4_t src, vbool32_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i8.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vcompress_vm_u8mf2_ta (vuint8mf2_t src, vbool16_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i8.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vcompress_vm_u8m1_ta (vuint8m1_t src, vbool8_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i8.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vcompress_vm_u8m2_ta (vuint8m2_t src, vbool4_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i8.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vcompress_vm_u8m4_ta (vuint8m4_t src, vbool2_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv64i8.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vcompress_vm_u8m8_ta (vuint8m8_t src, vbool1_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i16.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vcompress_vm_u16mf4_ta (vuint16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i16.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vcompress_vm_u16mf2_ta (vuint16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i16.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vcompress_vm_u16m1_ta (vuint16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i16.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vcompress_vm_u16m2_ta (vuint16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i16.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vcompress_vm_u16m4_ta (vuint16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i16.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vcompress_vm_u16m8_ta (vuint16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vcompress_vm_u32mf2_ta (vuint32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vcompress_vm_u32m1_ta (vuint32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vcompress_vm_u32m2_ta (vuint32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vcompress_vm_u32m4_ta (vuint32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vcompress_vm_u32m8_ta (vuint32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i64.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vcompress_vm_u64m1_ta (vuint64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i64.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vcompress_vm_u64m2_ta (vuint64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i64.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vcompress_vm_u64m4_ta (vuint64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i64.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vcompress_vm_u64m8_ta (vuint64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f16.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vcompress_vm_f16mf4_ta (vfloat16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f16.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vcompress_vm_f16mf2_ta (vfloat16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f16.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vcompress_vm_f16m1_ta (vfloat16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f16.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vcompress_vm_f16m2_ta (vfloat16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f16.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vcompress_vm_f16m4_ta (vfloat16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32f16.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vcompress_vm_f16m8_ta (vfloat16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vcompress_vm_f32mf2_ta (vfloat32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vcompress_vm_f32m1_ta (vfloat32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vcompress_vm_f32m2_ta (vfloat32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vcompress_vm_f32m4_ta (vfloat32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vcompress_vm_f32m8_ta (vfloat32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f64.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vcompress_vm_f64m1_ta (vfloat64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f64.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vcompress_vm_f64m2_ta (vfloat64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f64.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vcompress_vm_f64m4_ta (vfloat64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f64.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vcompress_vm_f64m8_ta (vfloat64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmerge.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmerge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmerge.c @@ -1,978 +1,1170 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +v \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +v -target-feature +zfh -target-feature +experimental-zvfh \ // RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmerge_vvm_i8mf8(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint8mf8_t test_vmerge_vvm_i8mf8 (vint8mf8_t op1, vint8mf8_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmerge_vxm_i8mf8(vbool64_t mask, vint8mf8_t op1, int8_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint8mf8_t test_vmerge_vxm_i8mf8 (vint8mf8_t op1, int8_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmerge_vvm_i8mf4(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint8mf4_t test_vmerge_vvm_i8mf4 (vint8mf4_t op1, vint8mf4_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmerge_vxm_i8mf4(vbool32_t mask, vint8mf4_t op1, int8_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint8mf4_t test_vmerge_vxm_i8mf4 (vint8mf4_t op1, int8_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmerge_vvm_i8mf2(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint8mf2_t test_vmerge_vvm_i8mf2 (vint8mf2_t op1, vint8mf2_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmerge_vxm_i8mf2(vbool16_t mask, vint8mf2_t op1, int8_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint8mf2_t test_vmerge_vxm_i8mf2 (vint8mf2_t op1, int8_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmerge_vvm_i8m1(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint8m1_t test_vmerge_vvm_i8m1 (vint8m1_t op1, vint8m1_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmerge_vxm_i8m1(vbool8_t mask, vint8m1_t op1, int8_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint8m1_t test_vmerge_vxm_i8m1 (vint8m1_t op1, int8_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmerge_vvm_i8m2(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint8m2_t test_vmerge_vvm_i8m2 (vint8m2_t op1, vint8m2_t op2, vbool4_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmerge_vxm_i8m2(vbool4_t mask, vint8m2_t op1, int8_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint8m2_t test_vmerge_vxm_i8m2 (vint8m2_t op1, int8_t op2, vbool4_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmerge_vvm_i8m4(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint8m4_t test_vmerge_vvm_i8m4 (vint8m4_t op1, vint8m4_t op2, vbool2_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmerge_vxm_i8m4(vbool2_t mask, vint8m4_t op1, int8_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint8m4_t test_vmerge_vxm_i8m4 (vint8m4_t op1, int8_t op2, vbool2_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmerge_vvm_i8m8(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint8m8_t test_vmerge_vvm_i8m8 (vint8m8_t op1, vint8m8_t op2, vbool1_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmerge_vxm_i8m8(vbool1_t mask, vint8m8_t op1, int8_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint8m8_t test_vmerge_vxm_i8m8 (vint8m8_t op1, int8_t op2, vbool1_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmerge_vvm_i16mf4(vbool64_t mask, vint16mf4_t op1, - vint16mf4_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint16mf4_t test_vmerge_vvm_i16mf4 (vint16mf4_t op1, vint16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmerge_vxm_i16mf4(vbool64_t mask, vint16mf4_t op1, int16_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint16mf4_t test_vmerge_vxm_i16mf4 (vint16mf4_t op1, int16_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmerge_vvm_i16mf2(vbool32_t mask, vint16mf2_t op1, - vint16mf2_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint16mf2_t test_vmerge_vvm_i16mf2 (vint16mf2_t op1, vint16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmerge_vxm_i16mf2(vbool32_t mask, vint16mf2_t op1, int16_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint16mf2_t test_vmerge_vxm_i16mf2 (vint16mf2_t op1, int16_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmerge_vvm_i16m1(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint16m1_t test_vmerge_vvm_i16m1 (vint16m1_t op1, vint16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmerge_vxm_i16m1(vbool16_t mask, vint16m1_t op1, int16_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint16m1_t test_vmerge_vxm_i16m1 (vint16m1_t op1, int16_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmerge_vvm_i16m2(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint16m2_t test_vmerge_vvm_i16m2 (vint16m2_t op1, vint16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmerge_vxm_i16m2(vbool8_t mask, vint16m2_t op1, int16_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint16m2_t test_vmerge_vxm_i16m2 (vint16m2_t op1, int16_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmerge_vvm_i16m4(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint16m4_t test_vmerge_vvm_i16m4 (vint16m4_t op1, vint16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmerge_vxm_i16m4(vbool4_t mask, vint16m4_t op1, int16_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint16m4_t test_vmerge_vxm_i16m4 (vint16m4_t op1, int16_t op2, vbool4_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmerge_vvm_i16m8(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint16m8_t test_vmerge_vvm_i16m8 (vint16m8_t op1, vint16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmerge_vxm_i16m8(vbool2_t mask, vint16m8_t op1, int16_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint16m8_t test_vmerge_vxm_i16m8 (vint16m8_t op1, int16_t op2, vbool2_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmerge_vvm_i32mf2(vbool64_t mask, vint32mf2_t op1, - vint32mf2_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint32mf2_t test_vmerge_vvm_i32mf2 (vint32mf2_t op1, vint32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmerge_vxm_i32mf2(vbool64_t mask, vint32mf2_t op1, int32_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint32mf2_t test_vmerge_vxm_i32mf2 (vint32mf2_t op1, int32_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmerge_vvm_i32m1(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint32m1_t test_vmerge_vvm_i32m1 (vint32m1_t op1, vint32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmerge_vxm_i32m1(vbool32_t mask, vint32m1_t op1, int32_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint32m1_t test_vmerge_vxm_i32m1 (vint32m1_t op1, int32_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmerge_vvm_i32m2(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint32m2_t test_vmerge_vvm_i32m2 (vint32m2_t op1, vint32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmerge_vxm_i32m2(vbool16_t mask, vint32m2_t op1, int32_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint32m2_t test_vmerge_vxm_i32m2 (vint32m2_t op1, int32_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmerge_vvm_i32m4(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint32m4_t test_vmerge_vvm_i32m4 (vint32m4_t op1, vint32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmerge_vxm_i32m4(vbool8_t mask, vint32m4_t op1, int32_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint32m4_t test_vmerge_vxm_i32m4 (vint32m4_t op1, int32_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmerge_vvm_i32m8(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint32m8_t test_vmerge_vvm_i32m8 (vint32m8_t op1, vint32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmerge_vxm_i32m8(vbool4_t mask, vint32m8_t op1, int32_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint32m8_t test_vmerge_vxm_i32m8 (vint32m8_t op1, int32_t op2, vbool4_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmerge_vvm_i64m1(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint64m1_t test_vmerge_vvm_i64m1 (vint64m1_t op1, vint64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmerge_vxm_i64m1(vbool64_t mask, vint64m1_t op1, int64_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint64m1_t test_vmerge_vxm_i64m1 (vint64m1_t op1, int64_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmerge_vvm_i64m2(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint64m2_t test_vmerge_vvm_i64m2 (vint64m2_t op1, vint64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmerge_vxm_i64m2(vbool32_t mask, vint64m2_t op1, int64_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint64m2_t test_vmerge_vxm_i64m2 (vint64m2_t op1, int64_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmerge_vvm_i64m4(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint64m4_t test_vmerge_vvm_i64m4 (vint64m4_t op1, vint64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmerge_vxm_i64m4(vbool16_t mask, vint64m4_t op1, int64_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint64m4_t test_vmerge_vxm_i64m4 (vint64m4_t op1, int64_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmerge_vvm_i64m8(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint64m8_t test_vmerge_vvm_i64m8 (vint64m8_t op1, vint64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmerge_vxm_i64m8(vbool8_t mask, vint64m8_t op1, int64_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint64m8_t test_vmerge_vxm_i64m8 (vint64m8_t op1, int64_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmerge_vvm_u8mf8(vbool64_t mask, vuint8mf8_t op1, - vuint8mf8_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint8mf8_t test_vmerge_vvm_u8mf8 (vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmerge_vxm_u8mf8(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint8mf8_t test_vmerge_vxm_u8mf8 (vuint8mf8_t op1, uint8_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmerge_vvm_u8mf4(vbool32_t mask, vuint8mf4_t op1, - vuint8mf4_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint8mf4_t test_vmerge_vvm_u8mf4 (vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmerge_vxm_u8mf4(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint8mf4_t test_vmerge_vxm_u8mf4 (vuint8mf4_t op1, uint8_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmerge_vvm_u8mf2(vbool16_t mask, vuint8mf2_t op1, - vuint8mf2_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint8mf2_t test_vmerge_vvm_u8mf2 (vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmerge_vxm_u8mf2(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint8mf2_t test_vmerge_vxm_u8mf2 (vuint8mf2_t op1, uint8_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmerge_vvm_u8m1(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint8m1_t test_vmerge_vvm_u8m1 (vuint8m1_t op1, vuint8m1_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmerge_vxm_u8m1(vbool8_t mask, vuint8m1_t op1, uint8_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint8m1_t test_vmerge_vxm_u8m1 (vuint8m1_t op1, uint8_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmerge_vvm_u8m2(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint8m2_t test_vmerge_vvm_u8m2 (vuint8m2_t op1, vuint8m2_t op2, vbool4_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmerge_vxm_u8m2(vbool4_t mask, vuint8m2_t op1, uint8_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint8m2_t test_vmerge_vxm_u8m2 (vuint8m2_t op1, uint8_t op2, vbool4_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmerge_vvm_u8m4(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint8m4_t test_vmerge_vvm_u8m4 (vuint8m4_t op1, vuint8m4_t op2, vbool2_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmerge_vxm_u8m4(vbool2_t mask, vuint8m4_t op1, uint8_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint8m4_t test_vmerge_vxm_u8m4 (vuint8m4_t op1, uint8_t op2, vbool2_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmerge_vvm_u8m8(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint8m8_t test_vmerge_vvm_u8m8 (vuint8m8_t op1, vuint8m8_t op2, vbool1_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmerge_vxm_u8m8(vbool1_t mask, vuint8m8_t op1, uint8_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint8m8_t test_vmerge_vxm_u8m8 (vuint8m8_t op1, uint8_t op2, vbool1_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmerge_vvm_u16mf4(vbool64_t mask, vuint16mf4_t op1, - vuint16mf4_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint16mf4_t test_vmerge_vvm_u16mf4 (vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmerge_vxm_u16mf4(vbool64_t mask, vuint16mf4_t op1, - uint16_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint16mf4_t test_vmerge_vxm_u16mf4 (vuint16mf4_t op1, uint16_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmerge_vvm_u16mf2(vbool32_t mask, vuint16mf2_t op1, - vuint16mf2_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint16mf2_t test_vmerge_vvm_u16mf2 (vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmerge_vxm_u16mf2(vbool32_t mask, vuint16mf2_t op1, - uint16_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint16mf2_t test_vmerge_vxm_u16mf2 (vuint16mf2_t op1, uint16_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmerge_vvm_u16m1(vbool16_t mask, vuint16m1_t op1, - vuint16m1_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint16m1_t test_vmerge_vvm_u16m1 (vuint16m1_t op1, vuint16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmerge_vxm_u16m1(vbool16_t mask, vuint16m1_t op1, uint16_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint16m1_t test_vmerge_vxm_u16m1 (vuint16m1_t op1, uint16_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmerge_vvm_u16m2(vbool8_t mask, vuint16m2_t op1, - vuint16m2_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint16m2_t test_vmerge_vvm_u16m2 (vuint16m2_t op1, vuint16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmerge_vxm_u16m2(vbool8_t mask, vuint16m2_t op1, uint16_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint16m2_t test_vmerge_vxm_u16m2 (vuint16m2_t op1, uint16_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmerge_vvm_u16m4(vbool4_t mask, vuint16m4_t op1, - vuint16m4_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint16m4_t test_vmerge_vvm_u16m4 (vuint16m4_t op1, vuint16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmerge_vxm_u16m4(vbool4_t mask, vuint16m4_t op1, uint16_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint16m4_t test_vmerge_vxm_u16m4 (vuint16m4_t op1, uint16_t op2, vbool4_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmerge_vvm_u16m8(vbool2_t mask, vuint16m8_t op1, - vuint16m8_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint16m8_t test_vmerge_vvm_u16m8 (vuint16m8_t op1, vuint16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmerge_vxm_u16m8(vbool2_t mask, vuint16m8_t op1, uint16_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint16m8_t test_vmerge_vxm_u16m8 (vuint16m8_t op1, uint16_t op2, vbool2_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmerge_vvm_u32mf2(vbool64_t mask, vuint32mf2_t op1, - vuint32mf2_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint32mf2_t test_vmerge_vvm_u32mf2 (vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmerge_vxm_u32mf2(vbool64_t mask, vuint32mf2_t op1, - uint32_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint32mf2_t test_vmerge_vxm_u32mf2 (vuint32mf2_t op1, uint32_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmerge_vvm_u32m1(vbool32_t mask, vuint32m1_t op1, - vuint32m1_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint32m1_t test_vmerge_vvm_u32m1 (vuint32m1_t op1, vuint32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmerge_vxm_u32m1(vbool32_t mask, vuint32m1_t op1, uint32_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint32m1_t test_vmerge_vxm_u32m1 (vuint32m1_t op1, uint32_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmerge_vvm_u32m2(vbool16_t mask, vuint32m2_t op1, - vuint32m2_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint32m2_t test_vmerge_vvm_u32m2 (vuint32m2_t op1, vuint32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmerge_vxm_u32m2(vbool16_t mask, vuint32m2_t op1, uint32_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint32m2_t test_vmerge_vxm_u32m2 (vuint32m2_t op1, uint32_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmerge_vvm_u32m4(vbool8_t mask, vuint32m4_t op1, - vuint32m4_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint32m4_t test_vmerge_vvm_u32m4 (vuint32m4_t op1, vuint32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmerge_vxm_u32m4(vbool8_t mask, vuint32m4_t op1, uint32_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint32m4_t test_vmerge_vxm_u32m4 (vuint32m4_t op1, uint32_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmerge_vvm_u32m8(vbool4_t mask, vuint32m8_t op1, - vuint32m8_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint32m8_t test_vmerge_vvm_u32m8 (vuint32m8_t op1, vuint32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmerge_vxm_u32m8(vbool4_t mask, vuint32m8_t op1, uint32_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint32m8_t test_vmerge_vxm_u32m8 (vuint32m8_t op1, uint32_t op2, vbool4_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmerge_vvm_u64m1(vbool64_t mask, vuint64m1_t op1, - vuint64m1_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint64m1_t test_vmerge_vvm_u64m1 (vuint64m1_t op1, vuint64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmerge_vxm_u64m1(vbool64_t mask, vuint64m1_t op1, uint64_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint64m1_t test_vmerge_vxm_u64m1 (vuint64m1_t op1, uint64_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmerge_vvm_u64m2(vbool32_t mask, vuint64m2_t op1, - vuint64m2_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint64m2_t test_vmerge_vvm_u64m2 (vuint64m2_t op1, vuint64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmerge_vxm_u64m2(vbool32_t mask, vuint64m2_t op1, uint64_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint64m2_t test_vmerge_vxm_u64m2 (vuint64m2_t op1, uint64_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmerge_vvm_u64m4(vbool16_t mask, vuint64m4_t op1, - vuint64m4_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint64m4_t test_vmerge_vvm_u64m4 (vuint64m4_t op1, vuint64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmerge_vxm_u64m4(vbool16_t mask, vuint64m4_t op1, uint64_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint64m4_t test_vmerge_vxm_u64m4 (vuint64m4_t op1, uint64_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmerge_vvm_u64m8(vbool8_t mask, vuint64m8_t op1, - vuint64m8_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint64m8_t test_vmerge_vvm_u64m8 (vuint64m8_t op1, vuint64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmerge_vxm_u64m8(vbool8_t mask, vuint64m8_t op1, uint64_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint64m8_t test_vmerge_vxm_u64m8 (vuint64m8_t op1, uint64_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vmerge_vvm_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vmerge_vvm_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vmerge_vvm_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vmerge_vvm_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vmerge_vvm_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vmerge_vvm_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vmerge_vvm_f32mf2(vbool64_t mask, vfloat32mf2_t op1, - vfloat32mf2_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vfloat32mf2_t test_vmerge_vvm_f32mf2 (vfloat32mf2_t op1, vfloat32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vmerge_vvm_f32m1(vbool32_t mask, vfloat32m1_t op1, - vfloat32m1_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vfloat32m1_t test_vmerge_vvm_f32m1 (vfloat32m1_t op1, vfloat32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vmerge_vvm_f32m2(vbool16_t mask, vfloat32m2_t op1, - vfloat32m2_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vfloat32m2_t test_vmerge_vvm_f32m2 (vfloat32m2_t op1, vfloat32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vmerge_vvm_f32m4(vbool8_t mask, vfloat32m4_t op1, - vfloat32m4_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vfloat32m4_t test_vmerge_vvm_f32m4 (vfloat32m4_t op1, vfloat32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vmerge_vvm_f32m8(vbool4_t mask, vfloat32m8_t op1, - vfloat32m8_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vfloat32m8_t test_vmerge_vvm_f32m8 (vfloat32m8_t op1, vfloat32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vmerge_vvm_f64m1(vbool64_t mask, vfloat64m1_t op1, - vfloat64m1_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vfloat64m1_t test_vmerge_vvm_f64m1 (vfloat64m1_t op1, vfloat64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vmerge_vvm_f64m2(vbool32_t mask, vfloat64m2_t op1, - vfloat64m2_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vfloat64m2_t test_vmerge_vvm_f64m2 (vfloat64m2_t op1, vfloat64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vmerge_vvm_f64m4(vbool16_t mask, vfloat64m4_t op1, - vfloat64m4_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vfloat64m4_t test_vmerge_vvm_f64m4 (vfloat64m4_t op1, vfloat64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vmerge_vvm_f64m8(vbool8_t mask, vfloat64m8_t op1, - vfloat64m8_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vfloat64m8_t test_vmerge_vvm_f64m8 (vfloat64m8_t op1, vfloat64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmerge_vvm_i8mf8_tu (vint8mf8_t merge, vint8mf8_t op1, vint8mf8_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmerge_vxm_i8mf8_tu (vint8mf8_t merge, vint8mf8_t op1, int8_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmerge_vvm_i8mf4_tu (vint8mf4_t merge, vint8mf4_t op1, vint8mf4_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmerge_vxm_i8mf4_tu (vint8mf4_t merge, vint8mf4_t op1, int8_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmerge_vvm_i8mf2_tu (vint8mf2_t merge, vint8mf2_t op1, vint8mf2_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmerge_vxm_i8mf2_tu (vint8mf2_t merge, vint8mf2_t op1, int8_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmerge_vvm_i8m1_tu (vint8m1_t merge, vint8m1_t op1, vint8m1_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmerge_vxm_i8m1_tu (vint8m1_t merge, vint8m1_t op1, int8_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmerge_vvm_i8m2_tu (vint8m2_t merge, vint8m2_t op1, vint8m2_t op2, vbool4_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmerge_vxm_i8m2_tu (vint8m2_t merge, vint8m2_t op1, int8_t op2, vbool4_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmerge_vvm_i8m4_tu (vint8m4_t merge, vint8m4_t op1, vint8m4_t op2, vbool2_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmerge_vxm_i8m4_tu (vint8m4_t merge, vint8m4_t op1, int8_t op2, vbool2_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmerge_vvm_i8m8_tu (vint8m8_t merge, vint8m8_t op1, vint8m8_t op2, vbool1_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmerge_vxm_i8m8_tu (vint8m8_t merge, vint8m8_t op1, int8_t op2, vbool1_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmerge_vvm_i16mf4_tu (vint16mf4_t merge, vint16mf4_t op1, vint16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmerge_vxm_i16mf4_tu (vint16mf4_t merge, vint16mf4_t op1, int16_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmerge_vvm_i16mf2_tu (vint16mf2_t merge, vint16mf2_t op1, vint16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmerge_vxm_i16mf2_tu (vint16mf2_t merge, vint16mf2_t op1, int16_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmerge_vvm_i16m1_tu (vint16m1_t merge, vint16m1_t op1, vint16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmerge_vxm_i16m1_tu (vint16m1_t merge, vint16m1_t op1, int16_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmerge_vvm_i16m2_tu (vint16m2_t merge, vint16m2_t op1, vint16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmerge_vxm_i16m2_tu (vint16m2_t merge, vint16m2_t op1, int16_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmerge_vvm_i16m4_tu (vint16m4_t merge, vint16m4_t op1, vint16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmerge_vxm_i16m4_tu (vint16m4_t merge, vint16m4_t op1, int16_t op2, vbool4_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmerge_vvm_i16m8_tu (vint16m8_t merge, vint16m8_t op1, vint16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmerge_vxm_i16m8_tu (vint16m8_t merge, vint16m8_t op1, int16_t op2, vbool2_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2_tu( @@ -980,8 +1172,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmerge_vvm_i32mf2_tu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmerge_tu(mask, merge, op1, op2, vl); +vint32mf2_t test_vmerge_vvm_i32mf2_tu (vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2_tu( @@ -989,78 +1181,1608 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmerge_vxm_i32mf2_tu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmerge_tu(mask, merge, op1, op2, vl); +vint32mf2_t test_vmerge_vxm_i32mf2_tu (vint32mf2_t merge, vint32mf2_t op1, int32_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2_tu( +// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmerge_vvm_u32mf2_tu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmerge_tu(mask, merge, op1, op2, vl); +vint32m1_t test_vmerge_vvm_i32m1_tu (vint32m1_t merge, vint32m1_t op1, vint32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2_tu( +// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmerge_vxm_u32mf2_tu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmerge_tu(mask, merge, op1, op2, vl); +vint32m1_t test_vmerge_vxm_i32m1_tu (vint32m1_t merge, vint32m1_t op1, int32_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2_ta( +// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmerge_vvm_i32mf2_ta(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint32m2_t test_vmerge_vvm_i32m2_tu (vint32m2_t merge, vint32m2_t op1, vint32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2_ta( +// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmerge_vxm_i32mf2_ta(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint32m2_t test_vmerge_vxm_i32m2_tu (vint32m2_t merge, vint32m2_t op1, int32_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2_ta( +// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmerge_vvm_u32mf2_ta(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint32m4_t test_vmerge_vvm_i32m4_tu (vint32m4_t merge, vint32m4_t op1, vint32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2_ta( +// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmerge_vxm_u32mf2_ta(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint32m4_t test_vmerge_vxm_i32m4_tu (vint32m4_t merge, vint32m4_t op1, int32_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2_tu( +// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vmerge_vvm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmerge_tu(mask, merge, op1, op2, vl); +vint32m8_t test_vmerge_vvm_i32m8_tu (vint32m8_t merge, vint32m8_t op1, vint32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2_ta( +// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmerge_vxm_i32m8_tu (vint32m8_t merge, vint32m8_t op1, int32_t op2, vbool4_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmerge_vvm_i64m1_tu (vint64m1_t merge, vint64m1_t op1, vint64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmerge_vxm_i64m1_tu (vint64m1_t merge, vint64m1_t op1, int64_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmerge_vvm_i64m2_tu (vint64m2_t merge, vint64m2_t op1, vint64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmerge_vxm_i64m2_tu (vint64m2_t merge, vint64m2_t op1, int64_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmerge_vvm_i64m4_tu (vint64m4_t merge, vint64m4_t op1, vint64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmerge_vxm_i64m4_tu (vint64m4_t merge, vint64m4_t op1, int64_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmerge_vvm_i64m8_tu (vint64m8_t merge, vint64m8_t op1, vint64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmerge_vxm_i64m8_tu (vint64m8_t merge, vint64m8_t op1, int64_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmerge_vvm_u8mf8_tu (vuint8mf8_t merge, vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmerge_vxm_u8mf8_tu (vuint8mf8_t merge, vuint8mf8_t op1, uint8_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmerge_vvm_u8mf4_tu (vuint8mf4_t merge, vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmerge_vxm_u8mf4_tu (vuint8mf4_t merge, vuint8mf4_t op1, uint8_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmerge_vvm_u8mf2_tu (vuint8mf2_t merge, vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmerge_vxm_u8mf2_tu (vuint8mf2_t merge, vuint8mf2_t op1, uint8_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmerge_vvm_u8m1_tu (vuint8m1_t merge, vuint8m1_t op1, vuint8m1_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmerge_vxm_u8m1_tu (vuint8m1_t merge, vuint8m1_t op1, uint8_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmerge_vvm_u8m2_tu (vuint8m2_t merge, vuint8m2_t op1, vuint8m2_t op2, vbool4_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmerge_vxm_u8m2_tu (vuint8m2_t merge, vuint8m2_t op1, uint8_t op2, vbool4_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmerge_vvm_u8m4_tu (vuint8m4_t merge, vuint8m4_t op1, vuint8m4_t op2, vbool2_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmerge_vxm_u8m4_tu (vuint8m4_t merge, vuint8m4_t op1, uint8_t op2, vbool2_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmerge_vvm_u8m8_tu (vuint8m8_t merge, vuint8m8_t op1, vuint8m8_t op2, vbool1_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmerge_vxm_u8m8_tu (vuint8m8_t merge, vuint8m8_t op1, uint8_t op2, vbool1_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmerge_vvm_u16mf4_tu (vuint16mf4_t merge, vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmerge_vxm_u16mf4_tu (vuint16mf4_t merge, vuint16mf4_t op1, uint16_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmerge_vvm_u16mf2_tu (vuint16mf2_t merge, vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmerge_vxm_u16mf2_tu (vuint16mf2_t merge, vuint16mf2_t op1, uint16_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmerge_vvm_u16m1_tu (vuint16m1_t merge, vuint16m1_t op1, vuint16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmerge_vxm_u16m1_tu (vuint16m1_t merge, vuint16m1_t op1, uint16_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmerge_vvm_u16m2_tu (vuint16m2_t merge, vuint16m2_t op1, vuint16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmerge_vxm_u16m2_tu (vuint16m2_t merge, vuint16m2_t op1, uint16_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmerge_vvm_u16m4_tu (vuint16m4_t merge, vuint16m4_t op1, vuint16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmerge_vxm_u16m4_tu (vuint16m4_t merge, vuint16m4_t op1, uint16_t op2, vbool4_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmerge_vvm_u16m8_tu (vuint16m8_t merge, vuint16m8_t op1, vuint16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmerge_vxm_u16m8_tu (vuint16m8_t merge, vuint16m8_t op1, uint16_t op2, vbool2_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmerge_vvm_u32mf2_tu (vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmerge_vxm_u32mf2_tu (vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmerge_vvm_u32m1_tu (vuint32m1_t merge, vuint32m1_t op1, vuint32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmerge_vxm_u32m1_tu (vuint32m1_t merge, vuint32m1_t op1, uint32_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmerge_vvm_u32m2_tu (vuint32m2_t merge, vuint32m2_t op1, vuint32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmerge_vxm_u32m2_tu (vuint32m2_t merge, vuint32m2_t op1, uint32_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmerge_vvm_u32m4_tu (vuint32m4_t merge, vuint32m4_t op1, vuint32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmerge_vxm_u32m4_tu (vuint32m4_t merge, vuint32m4_t op1, uint32_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmerge_vvm_u32m8_tu (vuint32m8_t merge, vuint32m8_t op1, vuint32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmerge_vxm_u32m8_tu (vuint32m8_t merge, vuint32m8_t op1, uint32_t op2, vbool4_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmerge_vvm_u64m1_tu (vuint64m1_t merge, vuint64m1_t op1, vuint64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmerge_vxm_u64m1_tu (vuint64m1_t merge, vuint64m1_t op1, uint64_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmerge_vvm_u64m2_tu (vuint64m2_t merge, vuint64m2_t op1, vuint64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmerge_vxm_u64m2_tu (vuint64m2_t merge, vuint64m2_t op1, uint64_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmerge_vvm_u64m4_tu (vuint64m4_t merge, vuint64m4_t op1, vuint64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmerge_vxm_u64m4_tu (vuint64m4_t merge, vuint64m4_t op1, uint64_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmerge_vvm_u64m8_tu (vuint64m8_t merge, vuint64m8_t op1, vuint64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmerge_vxm_u64m8_tu (vuint64m8_t merge, vuint64m8_t op1, uint64_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmerge_vvm_i8mf8_ta (vint8mf8_t op1, vint8mf8_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmerge_vxm_i8mf8_ta (vint8mf8_t op1, int8_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmerge_vvm_i8mf4_ta (vint8mf4_t op1, vint8mf4_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmerge_vxm_i8mf4_ta (vint8mf4_t op1, int8_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmerge_vvm_i8mf2_ta (vint8mf2_t op1, vint8mf2_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmerge_vxm_i8mf2_ta (vint8mf2_t op1, int8_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmerge_vvm_i8m1_ta (vint8m1_t op1, vint8m1_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmerge_vxm_i8m1_ta (vint8m1_t op1, int8_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmerge_vvm_i8m2_ta (vint8m2_t op1, vint8m2_t op2, vbool4_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmerge_vxm_i8m2_ta (vint8m2_t op1, int8_t op2, vbool4_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmerge_vvm_i8m4_ta (vint8m4_t op1, vint8m4_t op2, vbool2_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmerge_vxm_i8m4_ta (vint8m4_t op1, int8_t op2, vbool2_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmerge_vvm_i8m8_ta (vint8m8_t op1, vint8m8_t op2, vbool1_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmerge_vxm_i8m8_ta (vint8m8_t op1, int8_t op2, vbool1_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmerge_vvm_i16mf4_ta (vint16mf4_t op1, vint16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmerge_vxm_i16mf4_ta (vint16mf4_t op1, int16_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmerge_vvm_i16mf2_ta (vint16mf2_t op1, vint16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmerge_vxm_i16mf2_ta (vint16mf2_t op1, int16_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmerge_vvm_i16m1_ta (vint16m1_t op1, vint16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmerge_vxm_i16m1_ta (vint16m1_t op1, int16_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmerge_vvm_i16m2_ta (vint16m2_t op1, vint16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmerge_vxm_i16m2_ta (vint16m2_t op1, int16_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmerge_vvm_i16m4_ta (vint16m4_t op1, vint16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmerge_vxm_i16m4_ta (vint16m4_t op1, int16_t op2, vbool4_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmerge_vvm_i16m8_ta (vint16m8_t op1, vint16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmerge_vxm_i16m8_ta (vint16m8_t op1, int16_t op2, vbool2_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmerge_vvm_i32mf2_ta (vint32mf2_t op1, vint32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmerge_vxm_i32mf2_ta (vint32mf2_t op1, int32_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmerge_vvm_i32m1_ta (vint32m1_t op1, vint32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmerge_vxm_i32m1_ta (vint32m1_t op1, int32_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmerge_vvm_i32m2_ta (vint32m2_t op1, vint32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmerge_vxm_i32m2_ta (vint32m2_t op1, int32_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmerge_vvm_i32m4_ta (vint32m4_t op1, vint32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmerge_vxm_i32m4_ta (vint32m4_t op1, int32_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmerge_vvm_i32m8_ta (vint32m8_t op1, vint32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmerge_vxm_i32m8_ta (vint32m8_t op1, int32_t op2, vbool4_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmerge_vvm_i64m1_ta (vint64m1_t op1, vint64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmerge_vxm_i64m1_ta (vint64m1_t op1, int64_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmerge_vvm_i64m2_ta (vint64m2_t op1, vint64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmerge_vxm_i64m2_ta (vint64m2_t op1, int64_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmerge_vvm_i64m4_ta (vint64m4_t op1, vint64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmerge_vxm_i64m4_ta (vint64m4_t op1, int64_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmerge_vvm_i64m8_ta (vint64m8_t op1, vint64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmerge_vxm_i64m8_ta (vint64m8_t op1, int64_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmerge_vvm_u8mf8_ta (vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmerge_vxm_u8mf8_ta (vuint8mf8_t op1, uint8_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmerge_vvm_u8mf4_ta (vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmerge_vxm_u8mf4_ta (vuint8mf4_t op1, uint8_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmerge_vvm_u8mf2_ta (vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmerge_vxm_u8mf2_ta (vuint8mf2_t op1, uint8_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmerge_vvm_u8m1_ta (vuint8m1_t op1, vuint8m1_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmerge_vxm_u8m1_ta (vuint8m1_t op1, uint8_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmerge_vvm_u8m2_ta (vuint8m2_t op1, vuint8m2_t op2, vbool4_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmerge_vxm_u8m2_ta (vuint8m2_t op1, uint8_t op2, vbool4_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmerge_vvm_u8m4_ta (vuint8m4_t op1, vuint8m4_t op2, vbool2_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmerge_vxm_u8m4_ta (vuint8m4_t op1, uint8_t op2, vbool2_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmerge_vvm_u8m8_ta (vuint8m8_t op1, vuint8m8_t op2, vbool1_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmerge_vxm_u8m8_ta (vuint8m8_t op1, uint8_t op2, vbool1_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmerge_vvm_u16mf4_ta (vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmerge_vxm_u16mf4_ta (vuint16mf4_t op1, uint16_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmerge_vvm_u16mf2_ta (vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmerge_vxm_u16mf2_ta (vuint16mf2_t op1, uint16_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmerge_vvm_u16m1_ta (vuint16m1_t op1, vuint16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmerge_vxm_u16m1_ta (vuint16m1_t op1, uint16_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmerge_vvm_u16m2_ta (vuint16m2_t op1, vuint16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmerge_vxm_u16m2_ta (vuint16m2_t op1, uint16_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmerge_vvm_u16m4_ta (vuint16m4_t op1, vuint16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmerge_vxm_u16m4_ta (vuint16m4_t op1, uint16_t op2, vbool4_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmerge_vvm_u16m8_ta (vuint16m8_t op1, vuint16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmerge_vxm_u16m8_ta (vuint16m8_t op1, uint16_t op2, vbool2_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmerge_vvm_u32mf2_ta (vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmerge_vxm_u32mf2_ta (vuint32mf2_t op1, uint32_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmerge_vvm_u32m1_ta (vuint32m1_t op1, vuint32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmerge_vxm_u32m1_ta (vuint32m1_t op1, uint32_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmerge_vvm_u32m2_ta (vuint32m2_t op1, vuint32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmerge_vxm_u32m2_ta (vuint32m2_t op1, uint32_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmerge_vvm_u32m4_ta (vuint32m4_t op1, vuint32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmerge_vxm_u32m4_ta (vuint32m4_t op1, uint32_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmerge_vvm_u32m8_ta (vuint32m8_t op1, vuint32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmerge_vxm_u32m8_ta (vuint32m8_t op1, uint32_t op2, vbool4_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmerge_vvm_u64m1_ta (vuint64m1_t op1, vuint64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmerge_vxm_u64m1_ta (vuint64m1_t op1, uint64_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmerge_vvm_u64m2_ta (vuint64m2_t op1, vuint64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmerge_vxm_u64m2_ta (vuint64m2_t op1, uint64_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmerge_vvm_u64m4_ta (vuint64m4_t op1, vuint64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmerge_vxm_u64m4_ta (vuint64m4_t op1, uint64_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmerge_vvm_u64m8_ta (vuint64m8_t op1, vuint64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmerge_vxm_u64m8_ta (vuint64m8_t op1, uint64_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f16.nxv1f16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vmerge_vvm_f16mf4_tu (vfloat16mf4_t merge, vfloat16mf4_t op1, vfloat16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f16.nxv2f16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vmerge_vvm_f16mf2_tu (vfloat16mf2_t merge, vfloat16mf2_t op1, vfloat16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f16.nxv4f16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vmerge_vvm_f16m1_tu (vfloat16m1_t merge, vfloat16m1_t op1, vfloat16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f16.nxv8f16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vmerge_vvm_f16m2_tu (vfloat16m2_t merge, vfloat16m2_t op1, vfloat16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f16.nxv16f16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vmerge_vvm_f16m4_tu (vfloat16m4_t merge, vfloat16m4_t op1, vfloat16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32f16.nxv32f16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vmerge_vvm_f16m8_tu (vfloat16m8_t merge, vfloat16m8_t op1, vfloat16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vmerge_vvm_f32mf2_tu (vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f32.nxv2f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vmerge_vvm_f32m1_tu (vfloat32m1_t merge, vfloat32m1_t op1, vfloat32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f32.nxv4f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vmerge_vvm_f32m2_tu (vfloat32m2_t merge, vfloat32m2_t op1, vfloat32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f32.nxv8f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vmerge_vvm_f32m4_tu (vfloat32m4_t merge, vfloat32m4_t op1, vfloat32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f32.nxv16f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vmerge_vvm_f32m8_tu (vfloat32m8_t merge, vfloat32m8_t op1, vfloat32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f64.nxv1f64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vmerge_vvm_f64m1_tu (vfloat64m1_t merge, vfloat64m1_t op1, vfloat64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f64.nxv2f64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vmerge_vvm_f64m2_tu (vfloat64m2_t merge, vfloat64m2_t op1, vfloat64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f64.nxv4f64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vmerge_vvm_f64m4_tu (vfloat64m4_t merge, vfloat64m4_t op1, vfloat64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f64.nxv8f64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vmerge_vvm_f64m8_tu (vfloat64m8_t merge, vfloat64m8_t op1, vfloat64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vmerge_vvm_f16mf4_ta (vfloat16mf4_t op1, vfloat16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vmerge_vvm_f16mf2_ta (vfloat16mf2_t op1, vfloat16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vmerge_vvm_f16m1_ta (vfloat16m1_t op1, vfloat16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vmerge_vvm_f16m2_ta (vfloat16m2_t op1, vfloat16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vmerge_vvm_f16m4_ta (vfloat16m4_t op1, vfloat16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vmerge_vvm_f16m8_ta (vfloat16m8_t op1, vfloat16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vmerge_vvm_f32mf2_ta (vfloat32mf2_t op1, vfloat32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vmerge_vvm_f32m1_ta (vfloat32m1_t op1, vfloat32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vmerge_vvm_f32m2_ta (vfloat32m2_t op1, vfloat32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vmerge_vvm_f32m4_ta (vfloat32m4_t op1, vfloat32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vmerge_vvm_f32m8_ta (vfloat32m8_t op1, vfloat32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vmerge_vvm_f64m1_ta (vfloat64m1_t op1, vfloat64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vmerge_vvm_f64m2_ta (vfloat64m2_t op1, vfloat64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vmerge_vvm_f64m4_ta (vfloat64m4_t op1, vfloat64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vmerge_vvm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vfloat64m8_t test_vmerge_vvm_f64m8_ta (vfloat64m8_t op1, vfloat64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vcompress.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vcompress.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vcompress.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vcompress.c @@ -11,8 +11,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vcompress_vm_i8mf8 (vbool64_t mask, vint8mf8_t dest, vint8mf8_t src, size_t vl) { - return vcompress_vm_i8mf8(mask, dest, src, vl); +vint8mf8_t test_vcompress_vm_i8mf8 (vint8mf8_t dest, vint8mf8_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_i8mf8(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf4( @@ -20,8 +20,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vcompress_vm_i8mf4 (vbool32_t mask, vint8mf4_t dest, vint8mf4_t src, size_t vl) { - return vcompress_vm_i8mf4(mask, dest, src, vl); +vint8mf4_t test_vcompress_vm_i8mf4 (vint8mf4_t dest, vint8mf4_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_i8mf4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf2( @@ -29,8 +29,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vcompress_vm_i8mf2 (vbool16_t mask, vint8mf2_t dest, vint8mf2_t src, size_t vl) { - return vcompress_vm_i8mf2(mask, dest, src, vl); +vint8mf2_t test_vcompress_vm_i8mf2 (vint8mf2_t dest, vint8mf2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_i8mf2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m1( @@ -38,8 +38,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vcompress_vm_i8m1 (vbool8_t mask, vint8m1_t dest, vint8m1_t src, size_t vl) { - return vcompress_vm_i8m1(mask, dest, src, vl); +vint8m1_t test_vcompress_vm_i8m1 (vint8m1_t dest, vint8m1_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_i8m1(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m2( @@ -47,8 +47,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vcompress_vm_i8m2 (vbool4_t mask, vint8m2_t dest, vint8m2_t src, size_t vl) { - return vcompress_vm_i8m2(mask, dest, src, vl); +vint8m2_t test_vcompress_vm_i8m2 (vint8m2_t dest, vint8m2_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_i8m2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m4( @@ -56,8 +56,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vcompress_vm_i8m4 (vbool2_t mask, vint8m4_t dest, vint8m4_t src, size_t vl) { - return vcompress_vm_i8m4(mask, dest, src, vl); +vint8m4_t test_vcompress_vm_i8m4 (vint8m4_t dest, vint8m4_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_i8m4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m8( @@ -65,8 +65,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv64i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vcompress_vm_i8m8 (vbool1_t mask, vint8m8_t dest, vint8m8_t src, size_t vl) { - return vcompress_vm_i8m8(mask, dest, src, vl); +vint8m8_t test_vcompress_vm_i8m8 (vint8m8_t dest, vint8m8_t src, vbool1_t mask, size_t vl) { + return vcompress_vm_i8m8(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf4( @@ -74,8 +74,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vcompress_vm_i16mf4 (vbool64_t mask, vint16mf4_t dest, vint16mf4_t src, size_t vl) { - return vcompress_vm_i16mf4(mask, dest, src, vl); +vint16mf4_t test_vcompress_vm_i16mf4 (vint16mf4_t dest, vint16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_i16mf4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf2( @@ -83,8 +83,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vcompress_vm_i16mf2 (vbool32_t mask, vint16mf2_t dest, vint16mf2_t src, size_t vl) { - return vcompress_vm_i16mf2(mask, dest, src, vl); +vint16mf2_t test_vcompress_vm_i16mf2 (vint16mf2_t dest, vint16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_i16mf2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m1( @@ -92,8 +92,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vcompress_vm_i16m1 (vbool16_t mask, vint16m1_t dest, vint16m1_t src, size_t vl) { - return vcompress_vm_i16m1(mask, dest, src, vl); +vint16m1_t test_vcompress_vm_i16m1 (vint16m1_t dest, vint16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_i16m1(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m2( @@ -101,8 +101,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vcompress_vm_i16m2 (vbool8_t mask, vint16m2_t dest, vint16m2_t src, size_t vl) { - return vcompress_vm_i16m2(mask, dest, src, vl); +vint16m2_t test_vcompress_vm_i16m2 (vint16m2_t dest, vint16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_i16m2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m4( @@ -110,8 +110,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vcompress_vm_i16m4 (vbool4_t mask, vint16m4_t dest, vint16m4_t src, size_t vl) { - return vcompress_vm_i16m4(mask, dest, src, vl); +vint16m4_t test_vcompress_vm_i16m4 (vint16m4_t dest, vint16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_i16m4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m8( @@ -119,8 +119,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vcompress_vm_i16m8 (vbool2_t mask, vint16m8_t dest, vint16m8_t src, size_t vl) { - return vcompress_vm_i16m8(mask, dest, src, vl); +vint16m8_t test_vcompress_vm_i16m8 (vint16m8_t dest, vint16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_i16m8(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2( @@ -128,8 +128,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vcompress_vm_i32mf2 (vbool64_t mask, vint32mf2_t dest, vint32mf2_t src, size_t vl) { - return vcompress_vm_i32mf2(mask, dest, src, vl); +vint32mf2_t test_vcompress_vm_i32mf2 (vint32mf2_t dest, vint32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_i32mf2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m1( @@ -137,8 +137,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vcompress_vm_i32m1 (vbool32_t mask, vint32m1_t dest, vint32m1_t src, size_t vl) { - return vcompress_vm_i32m1(mask, dest, src, vl); +vint32m1_t test_vcompress_vm_i32m1 (vint32m1_t dest, vint32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_i32m1(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m2( @@ -146,8 +146,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vcompress_vm_i32m2 (vbool16_t mask, vint32m2_t dest, vint32m2_t src, size_t vl) { - return vcompress_vm_i32m2(mask, dest, src, vl); +vint32m2_t test_vcompress_vm_i32m2 (vint32m2_t dest, vint32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_i32m2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m4( @@ -155,8 +155,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vcompress_vm_i32m4 (vbool8_t mask, vint32m4_t dest, vint32m4_t src, size_t vl) { - return vcompress_vm_i32m4(mask, dest, src, vl); +vint32m4_t test_vcompress_vm_i32m4 (vint32m4_t dest, vint32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_i32m4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m8( @@ -164,8 +164,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vcompress_vm_i32m8 (vbool4_t mask, vint32m8_t dest, vint32m8_t src, size_t vl) { - return vcompress_vm_i32m8(mask, dest, src, vl); +vint32m8_t test_vcompress_vm_i32m8 (vint32m8_t dest, vint32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_i32m8(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m1( @@ -173,8 +173,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vcompress_vm_i64m1 (vbool64_t mask, vint64m1_t dest, vint64m1_t src, size_t vl) { - return vcompress_vm_i64m1(mask, dest, src, vl); +vint64m1_t test_vcompress_vm_i64m1 (vint64m1_t dest, vint64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_i64m1(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m2( @@ -182,8 +182,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vcompress_vm_i64m2 (vbool32_t mask, vint64m2_t dest, vint64m2_t src, size_t vl) { - return vcompress_vm_i64m2(mask, dest, src, vl); +vint64m2_t test_vcompress_vm_i64m2 (vint64m2_t dest, vint64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_i64m2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m4( @@ -191,8 +191,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vcompress_vm_i64m4 (vbool16_t mask, vint64m4_t dest, vint64m4_t src, size_t vl) { - return vcompress_vm_i64m4(mask, dest, src, vl); +vint64m4_t test_vcompress_vm_i64m4 (vint64m4_t dest, vint64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_i64m4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m8( @@ -200,8 +200,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vcompress_vm_i64m8 (vbool8_t mask, vint64m8_t dest, vint64m8_t src, size_t vl) { - return vcompress_vm_i64m8(mask, dest, src, vl); +vint64m8_t test_vcompress_vm_i64m8 (vint64m8_t dest, vint64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_i64m8(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf8( @@ -209,8 +209,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vcompress_vm_u8mf8 (vbool64_t mask, vuint8mf8_t dest, vuint8mf8_t src, size_t vl) { - return vcompress_vm_u8mf8(mask, dest, src, vl); +vuint8mf8_t test_vcompress_vm_u8mf8 (vuint8mf8_t dest, vuint8mf8_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_u8mf8(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf4( @@ -218,8 +218,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vcompress_vm_u8mf4 (vbool32_t mask, vuint8mf4_t dest, vuint8mf4_t src, size_t vl) { - return vcompress_vm_u8mf4(mask, dest, src, vl); +vuint8mf4_t test_vcompress_vm_u8mf4 (vuint8mf4_t dest, vuint8mf4_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_u8mf4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf2( @@ -227,8 +227,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vcompress_vm_u8mf2 (vbool16_t mask, vuint8mf2_t dest, vuint8mf2_t src, size_t vl) { - return vcompress_vm_u8mf2(mask, dest, src, vl); +vuint8mf2_t test_vcompress_vm_u8mf2 (vuint8mf2_t dest, vuint8mf2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_u8mf2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m1( @@ -236,8 +236,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vcompress_vm_u8m1 (vbool8_t mask, vuint8m1_t dest, vuint8m1_t src, size_t vl) { - return vcompress_vm_u8m1(mask, dest, src, vl); +vuint8m1_t test_vcompress_vm_u8m1 (vuint8m1_t dest, vuint8m1_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_u8m1(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m2( @@ -245,8 +245,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vcompress_vm_u8m2 (vbool4_t mask, vuint8m2_t dest, vuint8m2_t src, size_t vl) { - return vcompress_vm_u8m2(mask, dest, src, vl); +vuint8m2_t test_vcompress_vm_u8m2 (vuint8m2_t dest, vuint8m2_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_u8m2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m4( @@ -254,8 +254,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vcompress_vm_u8m4 (vbool2_t mask, vuint8m4_t dest, vuint8m4_t src, size_t vl) { - return vcompress_vm_u8m4(mask, dest, src, vl); +vuint8m4_t test_vcompress_vm_u8m4 (vuint8m4_t dest, vuint8m4_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_u8m4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m8( @@ -263,8 +263,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv64i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vcompress_vm_u8m8 (vbool1_t mask, vuint8m8_t dest, vuint8m8_t src, size_t vl) { - return vcompress_vm_u8m8(mask, dest, src, vl); +vuint8m8_t test_vcompress_vm_u8m8 (vuint8m8_t dest, vuint8m8_t src, vbool1_t mask, size_t vl) { + return vcompress_vm_u8m8(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf4( @@ -272,8 +272,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vcompress_vm_u16mf4 (vbool64_t mask, vuint16mf4_t dest, vuint16mf4_t src, size_t vl) { - return vcompress_vm_u16mf4(mask, dest, src, vl); +vuint16mf4_t test_vcompress_vm_u16mf4 (vuint16mf4_t dest, vuint16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_u16mf4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf2( @@ -281,8 +281,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vcompress_vm_u16mf2 (vbool32_t mask, vuint16mf2_t dest, vuint16mf2_t src, size_t vl) { - return vcompress_vm_u16mf2(mask, dest, src, vl); +vuint16mf2_t test_vcompress_vm_u16mf2 (vuint16mf2_t dest, vuint16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_u16mf2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m1( @@ -290,8 +290,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vcompress_vm_u16m1 (vbool16_t mask, vuint16m1_t dest, vuint16m1_t src, size_t vl) { - return vcompress_vm_u16m1(mask, dest, src, vl); +vuint16m1_t test_vcompress_vm_u16m1 (vuint16m1_t dest, vuint16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_u16m1(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m2( @@ -299,8 +299,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vcompress_vm_u16m2 (vbool8_t mask, vuint16m2_t dest, vuint16m2_t src, size_t vl) { - return vcompress_vm_u16m2(mask, dest, src, vl); +vuint16m2_t test_vcompress_vm_u16m2 (vuint16m2_t dest, vuint16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_u16m2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m4( @@ -308,8 +308,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vcompress_vm_u16m4 (vbool4_t mask, vuint16m4_t dest, vuint16m4_t src, size_t vl) { - return vcompress_vm_u16m4(mask, dest, src, vl); +vuint16m4_t test_vcompress_vm_u16m4 (vuint16m4_t dest, vuint16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_u16m4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m8( @@ -317,8 +317,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vcompress_vm_u16m8 (vbool2_t mask, vuint16m8_t dest, vuint16m8_t src, size_t vl) { - return vcompress_vm_u16m8(mask, dest, src, vl); +vuint16m8_t test_vcompress_vm_u16m8 (vuint16m8_t dest, vuint16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_u16m8(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2( @@ -326,8 +326,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vcompress_vm_u32mf2 (vbool64_t mask, vuint32mf2_t dest, vuint32mf2_t src, size_t vl) { - return vcompress_vm_u32mf2(mask, dest, src, vl); +vuint32mf2_t test_vcompress_vm_u32mf2 (vuint32mf2_t dest, vuint32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_u32mf2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m1( @@ -335,8 +335,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vcompress_vm_u32m1 (vbool32_t mask, vuint32m1_t dest, vuint32m1_t src, size_t vl) { - return vcompress_vm_u32m1(mask, dest, src, vl); +vuint32m1_t test_vcompress_vm_u32m1 (vuint32m1_t dest, vuint32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_u32m1(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m2( @@ -344,8 +344,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vcompress_vm_u32m2 (vbool16_t mask, vuint32m2_t dest, vuint32m2_t src, size_t vl) { - return vcompress_vm_u32m2(mask, dest, src, vl); +vuint32m2_t test_vcompress_vm_u32m2 (vuint32m2_t dest, vuint32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_u32m2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m4( @@ -353,8 +353,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vcompress_vm_u32m4 (vbool8_t mask, vuint32m4_t dest, vuint32m4_t src, size_t vl) { - return vcompress_vm_u32m4(mask, dest, src, vl); +vuint32m4_t test_vcompress_vm_u32m4 (vuint32m4_t dest, vuint32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_u32m4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m8( @@ -362,8 +362,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vcompress_vm_u32m8 (vbool4_t mask, vuint32m8_t dest, vuint32m8_t src, size_t vl) { - return vcompress_vm_u32m8(mask, dest, src, vl); +vuint32m8_t test_vcompress_vm_u32m8 (vuint32m8_t dest, vuint32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_u32m8(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m1( @@ -371,8 +371,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vcompress_vm_u64m1 (vbool64_t mask, vuint64m1_t dest, vuint64m1_t src, size_t vl) { - return vcompress_vm_u64m1(mask, dest, src, vl); +vuint64m1_t test_vcompress_vm_u64m1 (vuint64m1_t dest, vuint64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_u64m1(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m2( @@ -380,8 +380,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vcompress_vm_u64m2 (vbool32_t mask, vuint64m2_t dest, vuint64m2_t src, size_t vl) { - return vcompress_vm_u64m2(mask, dest, src, vl); +vuint64m2_t test_vcompress_vm_u64m2 (vuint64m2_t dest, vuint64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_u64m2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m4( @@ -389,8 +389,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vcompress_vm_u64m4 (vbool16_t mask, vuint64m4_t dest, vuint64m4_t src, size_t vl) { - return vcompress_vm_u64m4(mask, dest, src, vl); +vuint64m4_t test_vcompress_vm_u64m4 (vuint64m4_t dest, vuint64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_u64m4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m8( @@ -398,8 +398,62 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vcompress_vm_u64m8 (vbool8_t mask, vuint64m8_t dest, vuint64m8_t src, size_t vl) { - return vcompress_vm_u64m8(mask, dest, src, vl); +vuint64m8_t test_vcompress_vm_u64m8 (vuint64m8_t dest, vuint64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_u64m8(dest, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vcompress_vm_f16mf4 (vfloat16mf4_t dest, vfloat16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_f16mf4(dest, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vcompress_vm_f16mf2 (vfloat16mf2_t dest, vfloat16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_f16mf2(dest, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vcompress_vm_f16m1 (vfloat16m1_t dest, vfloat16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_f16m1(dest, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vcompress_vm_f16m2 (vfloat16m2_t dest, vfloat16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_f16m2(dest, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vcompress_vm_f16m4 (vfloat16m4_t dest, vfloat16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_f16m4(dest, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vcompress_vm_f16m8 (vfloat16m8_t dest, vfloat16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_f16m8(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2( @@ -407,8 +461,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vcompress_vm_f32mf2 (vbool64_t mask, vfloat32mf2_t dest, vfloat32mf2_t src, size_t vl) { - return vcompress_vm_f32mf2(mask, dest, src, vl); +vfloat32mf2_t test_vcompress_vm_f32mf2 (vfloat32mf2_t dest, vfloat32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_f32mf2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m1( @@ -416,8 +470,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vcompress_vm_f32m1 (vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t src, size_t vl) { - return vcompress_vm_f32m1(mask, dest, src, vl); +vfloat32m1_t test_vcompress_vm_f32m1 (vfloat32m1_t dest, vfloat32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_f32m1(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m2( @@ -425,8 +479,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vcompress_vm_f32m2 (vbool16_t mask, vfloat32m2_t dest, vfloat32m2_t src, size_t vl) { - return vcompress_vm_f32m2(mask, dest, src, vl); +vfloat32m2_t test_vcompress_vm_f32m2 (vfloat32m2_t dest, vfloat32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_f32m2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m4( @@ -434,8 +488,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vcompress_vm_f32m4 (vbool8_t mask, vfloat32m4_t dest, vfloat32m4_t src, size_t vl) { - return vcompress_vm_f32m4(mask, dest, src, vl); +vfloat32m4_t test_vcompress_vm_f32m4 (vfloat32m4_t dest, vfloat32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_f32m4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m8( @@ -443,8 +497,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vcompress_vm_f32m8 (vbool4_t mask, vfloat32m8_t dest, vfloat32m8_t src, size_t vl) { - return vcompress_vm_f32m8(mask, dest, src, vl); +vfloat32m8_t test_vcompress_vm_f32m8 (vfloat32m8_t dest, vfloat32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_f32m8(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m1( @@ -452,8 +506,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vcompress_vm_f64m1 (vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t src, size_t vl) { - return vcompress_vm_f64m1(mask, dest, src, vl); +vfloat64m1_t test_vcompress_vm_f64m1 (vfloat64m1_t dest, vfloat64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_f64m1(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m2( @@ -461,8 +515,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vcompress_vm_f64m2 (vbool32_t mask, vfloat64m2_t dest, vfloat64m2_t src, size_t vl) { - return vcompress_vm_f64m2(mask, dest, src, vl); +vfloat64m2_t test_vcompress_vm_f64m2 (vfloat64m2_t dest, vfloat64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_f64m2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m4( @@ -470,8 +524,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vcompress_vm_f64m4 (vbool16_t mask, vfloat64m4_t dest, vfloat64m4_t src, size_t vl) { - return vcompress_vm_f64m4(mask, dest, src, vl); +vfloat64m4_t test_vcompress_vm_f64m4 (vfloat64m4_t dest, vfloat64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_f64m4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m8( @@ -479,62 +533,125 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vcompress_vm_f64m8 (vbool8_t mask, vfloat64m8_t dest, vfloat64m8_t src, size_t vl) { - return vcompress_vm_f64m8(mask, dest, src, vl); +vfloat64m8_t test_vcompress_vm_f64m8 (vfloat64m8_t dest, vfloat64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_f64m8(dest, src, mask, vl); } -// CHECK-RV64-LABEL: @test_vcompress_vm_f16mf4( +// CHECK-RV64-LABEL: @test_vcompress_vm_i8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vcompress_vm_f16mf4 (vbool64_t mask, vfloat16mf4_t dest, vfloat16mf4_t src, size_t vl) { - return vcompress_vm_f16mf4(mask, dest, src, vl); +vint8mf8_t test_vcompress_vm_i8mf8_tu (vint8mf8_t merge, vint8mf8_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_i8mf8_tu(merge, src, mask, vl); } -// CHECK-RV64-LABEL: @test_vcompress_vm_f16mf2( +// CHECK-RV64-LABEL: @test_vcompress_vm_i8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vcompress_vm_f16mf2 (vbool32_t mask, vfloat16mf2_t dest, vfloat16mf2_t src, size_t vl) { - return vcompress_vm_f16mf2(mask, dest, src, vl); +vint8mf4_t test_vcompress_vm_i8mf4_tu (vint8mf4_t merge, vint8mf4_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_i8mf4_tu(merge, src, mask, vl); } -// CHECK-RV64-LABEL: @test_vcompress_vm_f16m1( +// CHECK-RV64-LABEL: @test_vcompress_vm_i8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vcompress_vm_f16m1 (vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t src, size_t vl) { - return vcompress_vm_f16m1(mask, dest, src, vl); +vint8mf2_t test_vcompress_vm_i8mf2_tu (vint8mf2_t merge, vint8mf2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_i8mf2_tu(merge, src, mask, vl); } -// CHECK-RV64-LABEL: @test_vcompress_vm_f16m2( +// CHECK-RV64-LABEL: @test_vcompress_vm_i8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vcompress_vm_f16m2 (vbool8_t mask, vfloat16m2_t dest, vfloat16m2_t src, size_t vl) { - return vcompress_vm_f16m2(mask, dest, src, vl); +vint8m1_t test_vcompress_vm_i8m1_tu (vint8m1_t merge, vint8m1_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_i8m1_tu(merge, src, mask, vl); } -// CHECK-RV64-LABEL: @test_vcompress_vm_f16m4( +// CHECK-RV64-LABEL: @test_vcompress_vm_i8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vcompress_vm_f16m4 (vbool4_t mask, vfloat16m4_t dest, vfloat16m4_t src, size_t vl) { - return vcompress_vm_f16m4(mask, dest, src, vl); +vint8m2_t test_vcompress_vm_i8m2_tu (vint8m2_t merge, vint8m2_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_i8m2_tu(merge, src, mask, vl); } -// CHECK-RV64-LABEL: @test_vcompress_vm_f16m8( +// CHECK-RV64-LABEL: @test_vcompress_vm_i8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vcompress_vm_i8m4_tu (vint8m4_t merge, vint8m4_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_i8m4_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv64i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vcompress_vm_i8m8_tu (vint8m8_t merge, vint8m8_t src, vbool1_t mask, size_t vl) { + return vcompress_vm_i8m8_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vcompress_vm_i16mf4_tu (vint16mf4_t merge, vint16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_i16mf4_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vcompress_vm_i16mf2_tu (vint16mf2_t merge, vint16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_i16mf2_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vcompress_vm_i16m1_tu (vint16m1_t merge, vint16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_i16m1_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vcompress_vm_i16m2_tu (vint16m2_t merge, vint16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_i16m2_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vcompress_vm_f16m8 (vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t vl) { - return vcompress_vm_f16m8(mask, dest, src, vl); +vint16m4_t test_vcompress_vm_i16m4_tu (vint16m4_t merge, vint16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_i16m4_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vcompress_vm_i16m8_tu (vint16m8_t merge, vint16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_i16m8_tu(merge, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2_tu( @@ -542,8 +659,197 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vcompress_vm_i32mf2_tu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, size_t vl) { - return vcompress_vm_i32mf2_tu(mask, merge, src, vl); +vint32mf2_t test_vcompress_vm_i32mf2_tu (vint32mf2_t merge, vint32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_i32mf2_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vcompress_vm_i32m1_tu (vint32m1_t merge, vint32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_i32m1_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vcompress_vm_i32m2_tu (vint32m2_t merge, vint32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_i32m2_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vcompress_vm_i32m4_tu (vint32m4_t merge, vint32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_i32m4_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vcompress_vm_i32m8_tu (vint32m8_t merge, vint32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_i32m8_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vcompress_vm_i64m1_tu (vint64m1_t merge, vint64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_i64m1_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vcompress_vm_i64m2_tu (vint64m2_t merge, vint64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_i64m2_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vcompress_vm_i64m4_tu (vint64m4_t merge, vint64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_i64m4_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vcompress_vm_i64m8_tu (vint64m8_t merge, vint64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_i64m8_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8mf8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vcompress_vm_u8mf8_tu (vuint8mf8_t merge, vuint8mf8_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_u8mf8_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vcompress_vm_u8mf4_tu (vuint8mf4_t merge, vuint8mf4_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_u8mf4_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vcompress_vm_u8mf2_tu (vuint8mf2_t merge, vuint8mf2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_u8mf2_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vcompress_vm_u8m1_tu (vuint8m1_t merge, vuint8m1_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_u8m1_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vcompress_vm_u8m2_tu (vuint8m2_t merge, vuint8m2_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_u8m2_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vcompress_vm_u8m4_tu (vuint8m4_t merge, vuint8m4_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_u8m4_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv64i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vcompress_vm_u8m8_tu (vuint8m8_t merge, vuint8m8_t src, vbool1_t mask, size_t vl) { + return vcompress_vm_u8m8_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vcompress_vm_u16mf4_tu (vuint16mf4_t merge, vuint16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_u16mf4_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vcompress_vm_u16mf2_tu (vuint16mf2_t merge, vuint16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_u16mf2_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vcompress_vm_u16m1_tu (vuint16m1_t merge, vuint16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_u16m1_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vcompress_vm_u16m2_tu (vuint16m2_t merge, vuint16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_u16m2_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vcompress_vm_u16m4_tu (vuint16m4_t merge, vuint16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_u16m4_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vcompress_vm_u16m8_tu (vuint16m8_t merge, vuint16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_u16m8_tu(merge, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2_tu( @@ -551,42 +857,744 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vcompress_vm_u32mf2_tu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, size_t vl) { - return vcompress_vm_u32mf2_tu(mask, merge, src, vl); +vuint32mf2_t test_vcompress_vm_u32mf2_tu (vuint32mf2_t merge, vuint32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_u32mf2_tu(merge, src, mask, vl); } -// CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2_tu( +// CHECK-RV64-LABEL: @test_vcompress_vm_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vcompress_vm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t src, size_t vl) { - return vcompress_vm_f32mf2_tu(mask, merge, src, vl); +vuint32m1_t test_vcompress_vm_u32m1_tu (vuint32m1_t merge, vuint32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_u32m1_tu(merge, src, mask, vl); } -// CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2_ta( +// CHECK-RV64-LABEL: @test_vcompress_vm_u32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vcompress_vm_i32mf2_ta(vbool64_t mask, vint32mf2_t src, size_t vl) { - return vcompress_vm_i32mf2_ta(mask, src, vl); +vuint32m2_t test_vcompress_vm_u32m2_tu (vuint32m2_t merge, vuint32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_u32m2_tu(merge, src, mask, vl); } -// CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2_ta( +// CHECK-RV64-LABEL: @test_vcompress_vm_u32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vcompress_vm_u32mf2_ta(vbool64_t mask, vuint32mf2_t src, size_t vl) { - return vcompress_vm_u32mf2_ta(mask, src, vl); +vuint32m4_t test_vcompress_vm_u32m4_tu (vuint32m4_t merge, vuint32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_u32m4_tu(merge, src, mask, vl); } -// CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2_ta( +// CHECK-RV64-LABEL: @test_vcompress_vm_u32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vcompress_vm_u32m8_tu (vuint32m8_t merge, vuint32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_u32m8_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vcompress_vm_u64m1_tu (vuint64m1_t merge, vuint64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_u64m1_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vcompress_vm_u64m2_tu (vuint64m2_t merge, vuint64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_u64m2_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vcompress_vm_u64m4_tu (vuint64m4_t merge, vuint64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_u64m4_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vcompress_vm_u64m8_tu (vuint64m8_t merge, vuint64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_u64m8_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vcompress_vm_f16mf4_tu (vfloat16mf4_t merge, vfloat16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_f16mf4_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vcompress_vm_f16mf2_tu (vfloat16mf2_t merge, vfloat16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_f16mf2_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vcompress_vm_f16m1_tu (vfloat16m1_t merge, vfloat16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_f16m1_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vcompress_vm_f16m2_tu (vfloat16m2_t merge, vfloat16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_f16m2_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vcompress_vm_f16m4_tu (vfloat16m4_t merge, vfloat16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_f16m4_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32f16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vcompress_vm_f16m8_tu (vfloat16m8_t merge, vfloat16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_f16m8_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vcompress_vm_f32mf2_tu (vfloat32mf2_t merge, vfloat32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_f32mf2_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vcompress_vm_f32m1_tu (vfloat32m1_t merge, vfloat32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_f32m1_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vcompress_vm_f32m2_tu (vfloat32m2_t merge, vfloat32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_f32m2_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vcompress_vm_f32m4_tu (vfloat32m4_t merge, vfloat32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_f32m4_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vcompress_vm_f32m8_tu (vfloat32m8_t merge, vfloat32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_f32m8_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vcompress_vm_f64m1_tu (vfloat64m1_t merge, vfloat64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_f64m1_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vcompress_vm_f64m2_tu (vfloat64m2_t merge, vfloat64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_f64m2_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vcompress_vm_f64m4_tu (vfloat64m4_t merge, vfloat64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_f64m4_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vcompress_vm_f64m8_tu (vfloat64m8_t merge, vfloat64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_f64m8_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8mf8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i8.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vcompress_vm_i8mf8_ta (vint8mf8_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_i8mf8_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i8.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vcompress_vm_i8mf4_ta (vint8mf4_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_i8mf4_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i8.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vcompress_vm_i8mf2_ta (vint8mf2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_i8mf2_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i8.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vcompress_vm_i8m1_ta (vint8m1_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_i8m1_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i8.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vcompress_vm_i8m2_ta (vint8m2_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_i8m2_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i8.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vcompress_vm_i8m4_ta (vint8m4_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_i8m4_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv64i8.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vcompress_vm_i8m8_ta (vint8m8_t src, vbool1_t mask, size_t vl) { + return vcompress_vm_i8m8_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i16.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vcompress_vm_i16mf4_ta (vint16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_i16mf4_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i16.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vcompress_vm_i16mf2_ta (vint16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_i16mf2_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i16.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vcompress_vm_i16m1_ta (vint16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_i16m1_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i16.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vcompress_vm_i16m2_ta (vint16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_i16m2_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i16.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vcompress_vm_i16m4_ta (vint16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_i16m4_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i16.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vcompress_vm_i16m8_ta (vint16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_i16m8_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vcompress_vm_i32mf2_ta (vint32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_i32mf2_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vcompress_vm_i32m1_ta (vint32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_i32m1_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vcompress_vm_i32m2_ta (vint32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_i32m2_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vcompress_vm_i32m4_ta (vint32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_i32m4_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vcompress_vm_i32m8_ta (vint32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_i32m8_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i64.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vcompress_vm_i64m1_ta (vint64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_i64m1_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i64.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vcompress_vm_i64m2_ta (vint64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_i64m2_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i64.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vcompress_vm_i64m4_ta (vint64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_i64m4_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i64.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vcompress_vm_i64m8_ta (vint64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_i64m8_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8mf8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i8.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vcompress_vm_u8mf8_ta (vuint8mf8_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_u8mf8_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i8.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vcompress_vm_u8mf4_ta (vuint8mf4_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_u8mf4_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i8.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vcompress_vm_u8mf2_ta (vuint8mf2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_u8mf2_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i8.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vcompress_vm_u8m1_ta (vuint8m1_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_u8m1_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i8.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vcompress_vm_u8m2_ta (vuint8m2_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_u8m2_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i8.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vcompress_vm_u8m4_ta (vuint8m4_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_u8m4_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv64i8.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vcompress_vm_u8m8_ta (vuint8m8_t src, vbool1_t mask, size_t vl) { + return vcompress_vm_u8m8_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i16.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vcompress_vm_u16mf4_ta (vuint16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_u16mf4_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i16.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vcompress_vm_u16mf2_ta (vuint16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_u16mf2_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i16.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vcompress_vm_u16m1_ta (vuint16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_u16m1_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i16.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vcompress_vm_u16m2_ta (vuint16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_u16m2_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i16.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vcompress_vm_u16m4_ta (vuint16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_u16m4_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i16.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vcompress_vm_u16m8_ta (vuint16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_u16m8_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vcompress_vm_u32mf2_ta (vuint32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_u32mf2_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vcompress_vm_u32m1_ta (vuint32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_u32m1_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vcompress_vm_u32m2_ta (vuint32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_u32m2_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vcompress_vm_u32m4_ta (vuint32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_u32m4_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vcompress_vm_u32m8_ta (vuint32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_u32m8_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i64.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vcompress_vm_u64m1_ta (vuint64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_u64m1_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i64.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vcompress_vm_u64m2_ta (vuint64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_u64m2_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i64.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vcompress_vm_u64m4_ta (vuint64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_u64m4_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i64.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vcompress_vm_u64m8_ta (vuint64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_u64m8_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f16.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vcompress_vm_f16mf4_ta (vfloat16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_f16mf4_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f16.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vcompress_vm_f16mf2_ta (vfloat16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_f16mf2_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f16.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vcompress_vm_f16m1_ta (vfloat16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_f16m1_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f16.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vcompress_vm_f16m2_ta (vfloat16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_f16m2_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f16.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vcompress_vm_f16m4_ta (vfloat16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_f16m4_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32f16.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vcompress_vm_f16m8_ta (vfloat16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_f16m8_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vcompress_vm_f32mf2_ta (vfloat32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_f32mf2_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vcompress_vm_f32m1_ta (vfloat32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_f32m1_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vcompress_vm_f32m2_ta (vfloat32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_f32m2_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vcompress_vm_f32m4_ta (vfloat32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_f32m4_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vcompress_vm_f32m8_ta (vfloat32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_f32m8_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f64.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vcompress_vm_f64m1_ta (vfloat64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_f64m1_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f64.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vcompress_vm_f64m2_ta (vfloat64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_f64m2_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f64.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vcompress_vm_f64m4_ta (vfloat64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_f64m4_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f64.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vcompress_vm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return vcompress_vm_f32mf2_ta(mask, src, vl); +vfloat64m8_t test_vcompress_vm_f64m8_ta (vfloat64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_f64m8_ta(src, mask, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmerge.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmerge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmerge.c @@ -8,1026 +8,1163 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmerge_vvm_i8mf8(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, - size_t vl) { - return vmerge_vvm_i8mf8(mask, op1, op2, vl); +vint8mf8_t test_vmerge_vvm_i8mf8 (vint8mf8_t op1, vint8mf8_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_i8mf8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmerge_vxm_i8mf8(vbool64_t mask, vint8mf8_t op1, int8_t op2, - size_t vl) { - return vmerge_vxm_i8mf8(mask, op1, op2, vl); +vint8mf8_t test_vmerge_vxm_i8mf8 (vint8mf8_t op1, int8_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_i8mf8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmerge_vvm_i8mf4(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, - size_t vl) { - return vmerge_vvm_i8mf4(mask, op1, op2, vl); +vint8mf4_t test_vmerge_vvm_i8mf4 (vint8mf4_t op1, vint8mf4_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_i8mf4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmerge_vxm_i8mf4(vbool32_t mask, vint8mf4_t op1, int8_t op2, - size_t vl) { - return vmerge_vxm_i8mf4(mask, op1, op2, vl); +vint8mf4_t test_vmerge_vxm_i8mf4 (vint8mf4_t op1, int8_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_i8mf4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmerge_vvm_i8mf2(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, - size_t vl) { - return vmerge_vvm_i8mf2(mask, op1, op2, vl); +vint8mf2_t test_vmerge_vvm_i8mf2 (vint8mf2_t op1, vint8mf2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_i8mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmerge_vxm_i8mf2(vbool16_t mask, vint8mf2_t op1, int8_t op2, - size_t vl) { - return vmerge_vxm_i8mf2(mask, op1, op2, vl); +vint8mf2_t test_vmerge_vxm_i8mf2 (vint8mf2_t op1, int8_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_i8mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmerge_vvm_i8m1(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, - size_t vl) { - return vmerge_vvm_i8m1(mask, op1, op2, vl); +vint8m1_t test_vmerge_vvm_i8m1 (vint8m1_t op1, vint8m1_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_i8m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmerge_vxm_i8m1(vbool8_t mask, vint8m1_t op1, int8_t op2, - size_t vl) { - return vmerge_vxm_i8m1(mask, op1, op2, vl); +vint8m1_t test_vmerge_vxm_i8m1 (vint8m1_t op1, int8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_i8m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmerge_vvm_i8m2(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, - size_t vl) { - return vmerge_vvm_i8m2(mask, op1, op2, vl); +vint8m2_t test_vmerge_vvm_i8m2 (vint8m2_t op1, vint8m2_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_i8m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmerge_vxm_i8m2(vbool4_t mask, vint8m2_t op1, int8_t op2, - size_t vl) { - return vmerge_vxm_i8m2(mask, op1, op2, vl); +vint8m2_t test_vmerge_vxm_i8m2 (vint8m2_t op1, int8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_i8m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmerge_vvm_i8m4(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, - size_t vl) { - return vmerge_vvm_i8m4(mask, op1, op2, vl); +vint8m4_t test_vmerge_vvm_i8m4 (vint8m4_t op1, vint8m4_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_i8m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmerge_vxm_i8m4(vbool2_t mask, vint8m4_t op1, int8_t op2, - size_t vl) { - return vmerge_vxm_i8m4(mask, op1, op2, vl); +vint8m4_t test_vmerge_vxm_i8m4 (vint8m4_t op1, int8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vxm_i8m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmerge_vvm_i8m8(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, - size_t vl) { - return vmerge_vvm_i8m8(mask, op1, op2, vl); +vint8m8_t test_vmerge_vvm_i8m8 (vint8m8_t op1, vint8m8_t op2, vbool1_t mask, size_t vl) { + return vmerge_vvm_i8m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmerge_vxm_i8m8(vbool1_t mask, vint8m8_t op1, int8_t op2, - size_t vl) { - return vmerge_vxm_i8m8(mask, op1, op2, vl); +vint8m8_t test_vmerge_vxm_i8m8 (vint8m8_t op1, int8_t op2, vbool1_t mask, size_t vl) { + return vmerge_vxm_i8m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmerge_vvm_i16mf4(vbool64_t mask, vint16mf4_t op1, - vint16mf4_t op2, size_t vl) { - return vmerge_vvm_i16mf4(mask, op1, op2, vl); +vint16mf4_t test_vmerge_vvm_i16mf4 (vint16mf4_t op1, vint16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_i16mf4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmerge_vxm_i16mf4(vbool64_t mask, vint16mf4_t op1, int16_t op2, - size_t vl) { - return vmerge_vxm_i16mf4(mask, op1, op2, vl); +vint16mf4_t test_vmerge_vxm_i16mf4 (vint16mf4_t op1, int16_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_i16mf4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmerge_vvm_i16mf2(vbool32_t mask, vint16mf2_t op1, - vint16mf2_t op2, size_t vl) { - return vmerge_vvm_i16mf2(mask, op1, op2, vl); +vint16mf2_t test_vmerge_vvm_i16mf2 (vint16mf2_t op1, vint16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_i16mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmerge_vxm_i16mf2(vbool32_t mask, vint16mf2_t op1, int16_t op2, - size_t vl) { - return vmerge_vxm_i16mf2(mask, op1, op2, vl); +vint16mf2_t test_vmerge_vxm_i16mf2 (vint16mf2_t op1, int16_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_i16mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmerge_vvm_i16m1(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, - size_t vl) { - return vmerge_vvm_i16m1(mask, op1, op2, vl); +vint16m1_t test_vmerge_vvm_i16m1 (vint16m1_t op1, vint16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_i16m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmerge_vxm_i16m1(vbool16_t mask, vint16m1_t op1, int16_t op2, - size_t vl) { - return vmerge_vxm_i16m1(mask, op1, op2, vl); +vint16m1_t test_vmerge_vxm_i16m1 (vint16m1_t op1, int16_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_i16m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmerge_vvm_i16m2(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, - size_t vl) { - return vmerge_vvm_i16m2(mask, op1, op2, vl); +vint16m2_t test_vmerge_vvm_i16m2 (vint16m2_t op1, vint16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_i16m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmerge_vxm_i16m2(vbool8_t mask, vint16m2_t op1, int16_t op2, - size_t vl) { - return vmerge_vxm_i16m2(mask, op1, op2, vl); +vint16m2_t test_vmerge_vxm_i16m2 (vint16m2_t op1, int16_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_i16m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmerge_vvm_i16m4(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, - size_t vl) { - return vmerge_vvm_i16m4(mask, op1, op2, vl); +vint16m4_t test_vmerge_vvm_i16m4 (vint16m4_t op1, vint16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_i16m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmerge_vxm_i16m4(vbool4_t mask, vint16m4_t op1, int16_t op2, - size_t vl) { - return vmerge_vxm_i16m4(mask, op1, op2, vl); +vint16m4_t test_vmerge_vxm_i16m4 (vint16m4_t op1, int16_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_i16m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmerge_vvm_i16m8(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, - size_t vl) { - return vmerge_vvm_i16m8(mask, op1, op2, vl); +vint16m8_t test_vmerge_vvm_i16m8 (vint16m8_t op1, vint16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_i16m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmerge_vxm_i16m8(vbool2_t mask, vint16m8_t op1, int16_t op2, - size_t vl) { - return vmerge_vxm_i16m8(mask, op1, op2, vl); +vint16m8_t test_vmerge_vxm_i16m8 (vint16m8_t op1, int16_t op2, vbool2_t mask, size_t vl) { + return vmerge_vxm_i16m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmerge_vvm_i32mf2(vbool64_t mask, vint32mf2_t op1, - vint32mf2_t op2, size_t vl) { - return vmerge_vvm_i32mf2(mask, op1, op2, vl); +vint32mf2_t test_vmerge_vvm_i32mf2 (vint32mf2_t op1, vint32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_i32mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmerge_vxm_i32mf2(vbool64_t mask, vint32mf2_t op1, int32_t op2, - size_t vl) { - return vmerge_vxm_i32mf2(mask, op1, op2, vl); +vint32mf2_t test_vmerge_vxm_i32mf2 (vint32mf2_t op1, int32_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_i32mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmerge_vvm_i32m1(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, - size_t vl) { - return vmerge_vvm_i32m1(mask, op1, op2, vl); +vint32m1_t test_vmerge_vvm_i32m1 (vint32m1_t op1, vint32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_i32m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmerge_vxm_i32m1(vbool32_t mask, vint32m1_t op1, int32_t op2, - size_t vl) { - return vmerge_vxm_i32m1(mask, op1, op2, vl); +vint32m1_t test_vmerge_vxm_i32m1 (vint32m1_t op1, int32_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_i32m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmerge_vvm_i32m2(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, - size_t vl) { - return vmerge_vvm_i32m2(mask, op1, op2, vl); +vint32m2_t test_vmerge_vvm_i32m2 (vint32m2_t op1, vint32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_i32m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmerge_vxm_i32m2(vbool16_t mask, vint32m2_t op1, int32_t op2, - size_t vl) { - return vmerge_vxm_i32m2(mask, op1, op2, vl); +vint32m2_t test_vmerge_vxm_i32m2 (vint32m2_t op1, int32_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_i32m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmerge_vvm_i32m4(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, - size_t vl) { - return vmerge_vvm_i32m4(mask, op1, op2, vl); +vint32m4_t test_vmerge_vvm_i32m4 (vint32m4_t op1, vint32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_i32m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmerge_vxm_i32m4(vbool8_t mask, vint32m4_t op1, int32_t op2, - size_t vl) { - return vmerge_vxm_i32m4(mask, op1, op2, vl); +vint32m4_t test_vmerge_vxm_i32m4 (vint32m4_t op1, int32_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_i32m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmerge_vvm_i32m8(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, - size_t vl) { - return vmerge_vvm_i32m8(mask, op1, op2, vl); +vint32m8_t test_vmerge_vvm_i32m8 (vint32m8_t op1, vint32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_i32m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmerge_vxm_i32m8(vbool4_t mask, vint32m8_t op1, int32_t op2, - size_t vl) { - return vmerge_vxm_i32m8(mask, op1, op2, vl); +vint32m8_t test_vmerge_vxm_i32m8 (vint32m8_t op1, int32_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_i32m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmerge_vvm_i64m1(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, - size_t vl) { - return vmerge_vvm_i64m1(mask, op1, op2, vl); +vint64m1_t test_vmerge_vvm_i64m1 (vint64m1_t op1, vint64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_i64m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmerge_vxm_i64m1(vbool64_t mask, vint64m1_t op1, int64_t op2, - size_t vl) { - return vmerge_vxm_i64m1(mask, op1, op2, vl); +vint64m1_t test_vmerge_vxm_i64m1 (vint64m1_t op1, int64_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_i64m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmerge_vvm_i64m2(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, - size_t vl) { - return vmerge_vvm_i64m2(mask, op1, op2, vl); +vint64m2_t test_vmerge_vvm_i64m2 (vint64m2_t op1, vint64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_i64m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmerge_vxm_i64m2(vbool32_t mask, vint64m2_t op1, int64_t op2, - size_t vl) { - return vmerge_vxm_i64m2(mask, op1, op2, vl); +vint64m2_t test_vmerge_vxm_i64m2 (vint64m2_t op1, int64_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_i64m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmerge_vvm_i64m4(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, - size_t vl) { - return vmerge_vvm_i64m4(mask, op1, op2, vl); +vint64m4_t test_vmerge_vvm_i64m4 (vint64m4_t op1, vint64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_i64m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmerge_vxm_i64m4(vbool16_t mask, vint64m4_t op1, int64_t op2, - size_t vl) { - return vmerge_vxm_i64m4(mask, op1, op2, vl); +vint64m4_t test_vmerge_vxm_i64m4 (vint64m4_t op1, int64_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_i64m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmerge_vvm_i64m8(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, - size_t vl) { - return vmerge_vvm_i64m8(mask, op1, op2, vl); +vint64m8_t test_vmerge_vvm_i64m8 (vint64m8_t op1, vint64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_i64m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmerge_vxm_i64m8(vbool8_t mask, vint64m8_t op1, int64_t op2, - size_t vl) { - return vmerge_vxm_i64m8(mask, op1, op2, vl); +vint64m8_t test_vmerge_vxm_i64m8 (vint64m8_t op1, int64_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_i64m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmerge_vvm_u8mf8(vbool64_t mask, vuint8mf8_t op1, - vuint8mf8_t op2, size_t vl) { - return vmerge_vvm_u8mf8(mask, op1, op2, vl); +vuint8mf8_t test_vmerge_vvm_u8mf8 (vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_u8mf8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmerge_vxm_u8mf8(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, - size_t vl) { - return vmerge_vxm_u8mf8(mask, op1, op2, vl); +vuint8mf8_t test_vmerge_vxm_u8mf8 (vuint8mf8_t op1, uint8_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_u8mf8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmerge_vvm_u8mf4(vbool32_t mask, vuint8mf4_t op1, - vuint8mf4_t op2, size_t vl) { - return vmerge_vvm_u8mf4(mask, op1, op2, vl); +vuint8mf4_t test_vmerge_vvm_u8mf4 (vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_u8mf4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmerge_vxm_u8mf4(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, - size_t vl) { - return vmerge_vxm_u8mf4(mask, op1, op2, vl); +vuint8mf4_t test_vmerge_vxm_u8mf4 (vuint8mf4_t op1, uint8_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_u8mf4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmerge_vvm_u8mf2(vbool16_t mask, vuint8mf2_t op1, - vuint8mf2_t op2, size_t vl) { - return vmerge_vvm_u8mf2(mask, op1, op2, vl); +vuint8mf2_t test_vmerge_vvm_u8mf2 (vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_u8mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmerge_vxm_u8mf2(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, - size_t vl) { - return vmerge_vxm_u8mf2(mask, op1, op2, vl); +vuint8mf2_t test_vmerge_vxm_u8mf2 (vuint8mf2_t op1, uint8_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_u8mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmerge_vvm_u8m1(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, - size_t vl) { - return vmerge_vvm_u8m1(mask, op1, op2, vl); +vuint8m1_t test_vmerge_vvm_u8m1 (vuint8m1_t op1, vuint8m1_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_u8m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmerge_vxm_u8m1(vbool8_t mask, vuint8m1_t op1, uint8_t op2, - size_t vl) { - return vmerge_vxm_u8m1(mask, op1, op2, vl); +vuint8m1_t test_vmerge_vxm_u8m1 (vuint8m1_t op1, uint8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_u8m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmerge_vvm_u8m2(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, - size_t vl) { - return vmerge_vvm_u8m2(mask, op1, op2, vl); +vuint8m2_t test_vmerge_vvm_u8m2 (vuint8m2_t op1, vuint8m2_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_u8m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmerge_vxm_u8m2(vbool4_t mask, vuint8m2_t op1, uint8_t op2, - size_t vl) { - return vmerge_vxm_u8m2(mask, op1, op2, vl); +vuint8m2_t test_vmerge_vxm_u8m2 (vuint8m2_t op1, uint8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_u8m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmerge_vvm_u8m4(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, - size_t vl) { - return vmerge_vvm_u8m4(mask, op1, op2, vl); +vuint8m4_t test_vmerge_vvm_u8m4 (vuint8m4_t op1, vuint8m4_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_u8m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmerge_vxm_u8m4(vbool2_t mask, vuint8m4_t op1, uint8_t op2, - size_t vl) { - return vmerge_vxm_u8m4(mask, op1, op2, vl); +vuint8m4_t test_vmerge_vxm_u8m4 (vuint8m4_t op1, uint8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vxm_u8m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmerge_vvm_u8m8(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, - size_t vl) { - return vmerge_vvm_u8m8(mask, op1, op2, vl); +vuint8m8_t test_vmerge_vvm_u8m8 (vuint8m8_t op1, vuint8m8_t op2, vbool1_t mask, size_t vl) { + return vmerge_vvm_u8m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmerge_vxm_u8m8(vbool1_t mask, vuint8m8_t op1, uint8_t op2, - size_t vl) { - return vmerge_vxm_u8m8(mask, op1, op2, vl); +vuint8m8_t test_vmerge_vxm_u8m8 (vuint8m8_t op1, uint8_t op2, vbool1_t mask, size_t vl) { + return vmerge_vxm_u8m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmerge_vvm_u16mf4(vbool64_t mask, vuint16mf4_t op1, - vuint16mf4_t op2, size_t vl) { - return vmerge_vvm_u16mf4(mask, op1, op2, vl); +vuint16mf4_t test_vmerge_vvm_u16mf4 (vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_u16mf4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmerge_vxm_u16mf4(vbool64_t mask, vuint16mf4_t op1, - uint16_t op2, size_t vl) { - return vmerge_vxm_u16mf4(mask, op1, op2, vl); +vuint16mf4_t test_vmerge_vxm_u16mf4 (vuint16mf4_t op1, uint16_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_u16mf4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmerge_vvm_u16mf2(vbool32_t mask, vuint16mf2_t op1, - vuint16mf2_t op2, size_t vl) { - return vmerge_vvm_u16mf2(mask, op1, op2, vl); +vuint16mf2_t test_vmerge_vvm_u16mf2 (vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_u16mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmerge_vxm_u16mf2(vbool32_t mask, vuint16mf2_t op1, - uint16_t op2, size_t vl) { - return vmerge_vxm_u16mf2(mask, op1, op2, vl); +vuint16mf2_t test_vmerge_vxm_u16mf2 (vuint16mf2_t op1, uint16_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_u16mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmerge_vvm_u16m1(vbool16_t mask, vuint16m1_t op1, - vuint16m1_t op2, size_t vl) { - return vmerge_vvm_u16m1(mask, op1, op2, vl); +vuint16m1_t test_vmerge_vvm_u16m1 (vuint16m1_t op1, vuint16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_u16m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmerge_vxm_u16m1(vbool16_t mask, vuint16m1_t op1, uint16_t op2, - size_t vl) { - return vmerge_vxm_u16m1(mask, op1, op2, vl); +vuint16m1_t test_vmerge_vxm_u16m1 (vuint16m1_t op1, uint16_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_u16m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmerge_vvm_u16m2(vbool8_t mask, vuint16m2_t op1, - vuint16m2_t op2, size_t vl) { - return vmerge_vvm_u16m2(mask, op1, op2, vl); +vuint16m2_t test_vmerge_vvm_u16m2 (vuint16m2_t op1, vuint16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_u16m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmerge_vxm_u16m2(vbool8_t mask, vuint16m2_t op1, uint16_t op2, - size_t vl) { - return vmerge_vxm_u16m2(mask, op1, op2, vl); +vuint16m2_t test_vmerge_vxm_u16m2 (vuint16m2_t op1, uint16_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_u16m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmerge_vvm_u16m4(vbool4_t mask, vuint16m4_t op1, - vuint16m4_t op2, size_t vl) { - return vmerge_vvm_u16m4(mask, op1, op2, vl); +vuint16m4_t test_vmerge_vvm_u16m4 (vuint16m4_t op1, vuint16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_u16m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmerge_vxm_u16m4(vbool4_t mask, vuint16m4_t op1, uint16_t op2, - size_t vl) { - return vmerge_vxm_u16m4(mask, op1, op2, vl); +vuint16m4_t test_vmerge_vxm_u16m4 (vuint16m4_t op1, uint16_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_u16m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmerge_vvm_u16m8(vbool2_t mask, vuint16m8_t op1, - vuint16m8_t op2, size_t vl) { - return vmerge_vvm_u16m8(mask, op1, op2, vl); +vuint16m8_t test_vmerge_vvm_u16m8 (vuint16m8_t op1, vuint16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_u16m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmerge_vxm_u16m8(vbool2_t mask, vuint16m8_t op1, uint16_t op2, - size_t vl) { - return vmerge_vxm_u16m8(mask, op1, op2, vl); +vuint16m8_t test_vmerge_vxm_u16m8 (vuint16m8_t op1, uint16_t op2, vbool2_t mask, size_t vl) { + return vmerge_vxm_u16m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmerge_vvm_u32mf2(vbool64_t mask, vuint32mf2_t op1, - vuint32mf2_t op2, size_t vl) { - return vmerge_vvm_u32mf2(mask, op1, op2, vl); +vuint32mf2_t test_vmerge_vvm_u32mf2 (vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_u32mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmerge_vxm_u32mf2(vbool64_t mask, vuint32mf2_t op1, - uint32_t op2, size_t vl) { - return vmerge_vxm_u32mf2(mask, op1, op2, vl); +vuint32mf2_t test_vmerge_vxm_u32mf2 (vuint32mf2_t op1, uint32_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_u32mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmerge_vvm_u32m1(vbool32_t mask, vuint32m1_t op1, - vuint32m1_t op2, size_t vl) { - return vmerge_vvm_u32m1(mask, op1, op2, vl); +vuint32m1_t test_vmerge_vvm_u32m1 (vuint32m1_t op1, vuint32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_u32m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmerge_vxm_u32m1(vbool32_t mask, vuint32m1_t op1, uint32_t op2, - size_t vl) { - return vmerge_vxm_u32m1(mask, op1, op2, vl); +vuint32m1_t test_vmerge_vxm_u32m1 (vuint32m1_t op1, uint32_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_u32m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmerge_vvm_u32m2(vbool16_t mask, vuint32m2_t op1, - vuint32m2_t op2, size_t vl) { - return vmerge_vvm_u32m2(mask, op1, op2, vl); +vuint32m2_t test_vmerge_vvm_u32m2 (vuint32m2_t op1, vuint32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_u32m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmerge_vxm_u32m2(vbool16_t mask, vuint32m2_t op1, uint32_t op2, - size_t vl) { - return vmerge_vxm_u32m2(mask, op1, op2, vl); +vuint32m2_t test_vmerge_vxm_u32m2 (vuint32m2_t op1, uint32_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_u32m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmerge_vvm_u32m4(vbool8_t mask, vuint32m4_t op1, - vuint32m4_t op2, size_t vl) { - return vmerge_vvm_u32m4(mask, op1, op2, vl); +vuint32m4_t test_vmerge_vvm_u32m4 (vuint32m4_t op1, vuint32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_u32m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmerge_vxm_u32m4(vbool8_t mask, vuint32m4_t op1, uint32_t op2, - size_t vl) { - return vmerge_vxm_u32m4(mask, op1, op2, vl); +vuint32m4_t test_vmerge_vxm_u32m4 (vuint32m4_t op1, uint32_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_u32m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmerge_vvm_u32m8(vbool4_t mask, vuint32m8_t op1, - vuint32m8_t op2, size_t vl) { - return vmerge_vvm_u32m8(mask, op1, op2, vl); +vuint32m8_t test_vmerge_vvm_u32m8 (vuint32m8_t op1, vuint32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_u32m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmerge_vxm_u32m8(vbool4_t mask, vuint32m8_t op1, uint32_t op2, - size_t vl) { - return vmerge_vxm_u32m8(mask, op1, op2, vl); +vuint32m8_t test_vmerge_vxm_u32m8 (vuint32m8_t op1, uint32_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_u32m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmerge_vvm_u64m1(vbool64_t mask, vuint64m1_t op1, - vuint64m1_t op2, size_t vl) { - return vmerge_vvm_u64m1(mask, op1, op2, vl); +vuint64m1_t test_vmerge_vvm_u64m1 (vuint64m1_t op1, vuint64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_u64m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmerge_vxm_u64m1(vbool64_t mask, vuint64m1_t op1, uint64_t op2, - size_t vl) { - return vmerge_vxm_u64m1(mask, op1, op2, vl); +vuint64m1_t test_vmerge_vxm_u64m1 (vuint64m1_t op1, uint64_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_u64m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmerge_vvm_u64m2(vbool32_t mask, vuint64m2_t op1, - vuint64m2_t op2, size_t vl) { - return vmerge_vvm_u64m2(mask, op1, op2, vl); +vuint64m2_t test_vmerge_vvm_u64m2 (vuint64m2_t op1, vuint64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_u64m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmerge_vxm_u64m2(vbool32_t mask, vuint64m2_t op1, uint64_t op2, - size_t vl) { - return vmerge_vxm_u64m2(mask, op1, op2, vl); +vuint64m2_t test_vmerge_vxm_u64m2 (vuint64m2_t op1, uint64_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_u64m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmerge_vvm_u64m4(vbool16_t mask, vuint64m4_t op1, - vuint64m4_t op2, size_t vl) { - return vmerge_vvm_u64m4(mask, op1, op2, vl); +vuint64m4_t test_vmerge_vvm_u64m4 (vuint64m4_t op1, vuint64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_u64m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmerge_vxm_u64m4(vbool16_t mask, vuint64m4_t op1, uint64_t op2, - size_t vl) { - return vmerge_vxm_u64m4(mask, op1, op2, vl); +vuint64m4_t test_vmerge_vxm_u64m4 (vuint64m4_t op1, uint64_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_u64m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmerge_vvm_u64m8(vbool8_t mask, vuint64m8_t op1, - vuint64m8_t op2, size_t vl) { - return vmerge_vvm_u64m8(mask, op1, op2, vl); +vuint64m8_t test_vmerge_vvm_u64m8 (vuint64m8_t op1, vuint64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_u64m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmerge_vxm_u64m8(vbool8_t mask, vuint64m8_t op1, uint64_t op2, - size_t vl) { - return vmerge_vxm_u64m8(mask, op1, op2, vl); +vuint64m8_t test_vmerge_vxm_u64m8 (vuint64m8_t op1, uint64_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_u64m8(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vmerge_vvm_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_f16mf4(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vmerge_vvm_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_f16mf2(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vmerge_vvm_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_f16m1(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vmerge_vvm_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_f16m2(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vmerge_vvm_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_f16m4(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vmerge_vvm_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_f16m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vmerge_vvm_f32mf2(vbool64_t mask, vfloat32mf2_t op1, - vfloat32mf2_t op2, size_t vl) { - return vmerge_vvm_f32mf2(mask, op1, op2, vl); +vfloat32mf2_t test_vmerge_vvm_f32mf2 (vfloat32mf2_t op1, vfloat32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_f32mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vmerge_vvm_f32m1(vbool32_t mask, vfloat32m1_t op1, - vfloat32m1_t op2, size_t vl) { - return vmerge_vvm_f32m1(mask, op1, op2, vl); +vfloat32m1_t test_vmerge_vvm_f32m1 (vfloat32m1_t op1, vfloat32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_f32m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vmerge_vvm_f32m2(vbool16_t mask, vfloat32m2_t op1, - vfloat32m2_t op2, size_t vl) { - return vmerge_vvm_f32m2(mask, op1, op2, vl); +vfloat32m2_t test_vmerge_vvm_f32m2 (vfloat32m2_t op1, vfloat32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_f32m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vmerge_vvm_f32m4(vbool8_t mask, vfloat32m4_t op1, - vfloat32m4_t op2, size_t vl) { - return vmerge_vvm_f32m4(mask, op1, op2, vl); +vfloat32m4_t test_vmerge_vvm_f32m4 (vfloat32m4_t op1, vfloat32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_f32m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vmerge_vvm_f32m8(vbool4_t mask, vfloat32m8_t op1, - vfloat32m8_t op2, size_t vl) { - return vmerge_vvm_f32m8(mask, op1, op2, vl); +vfloat32m8_t test_vmerge_vvm_f32m8 (vfloat32m8_t op1, vfloat32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_f32m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vmerge_vvm_f64m1(vbool64_t mask, vfloat64m1_t op1, - vfloat64m1_t op2, size_t vl) { - return vmerge_vvm_f64m1(mask, op1, op2, vl); +vfloat64m1_t test_vmerge_vvm_f64m1 (vfloat64m1_t op1, vfloat64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_f64m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vmerge_vvm_f64m2(vbool32_t mask, vfloat64m2_t op1, - vfloat64m2_t op2, size_t vl) { - return vmerge_vvm_f64m2(mask, op1, op2, vl); +vfloat64m2_t test_vmerge_vvm_f64m2 (vfloat64m2_t op1, vfloat64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_f64m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vmerge_vvm_f64m4(vbool16_t mask, vfloat64m4_t op1, - vfloat64m4_t op2, size_t vl) { - return vmerge_vvm_f64m4(mask, op1, op2, vl); +vfloat64m4_t test_vmerge_vvm_f64m4 (vfloat64m4_t op1, vfloat64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_f64m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vmerge_vvm_f64m8(vbool8_t mask, vfloat64m8_t op1, - vfloat64m8_t op2, size_t vl) { - return vmerge_vvm_f64m8(mask, op1, op2, vl); +vfloat64m8_t test_vmerge_vvm_f64m8 (vfloat64m8_t op1, vfloat64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_f64m8(op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf4( +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vmerge_vvm_f16mf4(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmerge_vvm_f16mf4(mask, op1, op2, vl); +vint8mf8_t test_vmerge_vvm_i8mf8_tu (vint8mf8_t merge, vint8mf8_t op1, vint8mf8_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_i8mf8_tu(merge, op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf2( +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vmerge_vvm_f16mf2(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmerge_vvm_f16mf2(mask, op1, op2, vl); +vint8mf8_t test_vmerge_vxm_i8mf8_tu (vint8mf8_t merge, vint8mf8_t op1, int8_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_i8mf8_tu(merge, op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m1( +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vmerge_vvm_f16m1(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmerge_vvm_f16m1(mask, op1, op2, vl); +vint8mf4_t test_vmerge_vvm_i8mf4_tu (vint8mf4_t merge, vint8mf4_t op1, vint8mf4_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_i8mf4_tu(merge, op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m2( +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vmerge_vvm_f16m2(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmerge_vvm_f16m2(mask, op1, op2, vl); +vint8mf4_t test_vmerge_vxm_i8mf4_tu (vint8mf4_t merge, vint8mf4_t op1, int8_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_i8mf4_tu(merge, op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m4( +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vmerge_vvm_f16m4(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmerge_vvm_f16m4(mask, op1, op2, vl); +vint8mf2_t test_vmerge_vvm_i8mf2_tu (vint8mf2_t merge, vint8mf2_t op1, vint8mf2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_i8mf2_tu(merge, op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m8( +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmerge_vxm_i8mf2_tu (vint8mf2_t merge, vint8mf2_t op1, int8_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_i8mf2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmerge_vvm_i8m1_tu (vint8m1_t merge, vint8m1_t op1, vint8m1_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_i8m1_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmerge_vxm_i8m1_tu (vint8m1_t merge, vint8m1_t op1, int8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_i8m1_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmerge_vvm_i8m2_tu (vint8m2_t merge, vint8m2_t op1, vint8m2_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_i8m2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmerge_vxm_i8m2_tu (vint8m2_t merge, vint8m2_t op1, int8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_i8m2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmerge_vvm_i8m4_tu (vint8m4_t merge, vint8m4_t op1, vint8m4_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_i8m4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmerge_vxm_i8m4_tu (vint8m4_t merge, vint8m4_t op1, int8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vxm_i8m4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmerge_vvm_i8m8_tu (vint8m8_t merge, vint8m8_t op1, vint8m8_t op2, vbool1_t mask, size_t vl) { + return vmerge_vvm_i8m8_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmerge_vxm_i8m8_tu (vint8m8_t merge, vint8m8_t op1, int8_t op2, vbool1_t mask, size_t vl) { + return vmerge_vxm_i8m8_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmerge_vvm_i16mf4_tu (vint16mf4_t merge, vint16mf4_t op1, vint16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_i16mf4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmerge_vxm_i16mf4_tu (vint16mf4_t merge, vint16mf4_t op1, int16_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_i16mf4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmerge_vvm_i16mf2_tu (vint16mf2_t merge, vint16mf2_t op1, vint16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_i16mf2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmerge_vxm_i16mf2_tu (vint16mf2_t merge, vint16mf2_t op1, int16_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_i16mf2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmerge_vvm_i16m1_tu (vint16m1_t merge, vint16m1_t op1, vint16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_i16m1_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmerge_vxm_i16m1_tu (vint16m1_t merge, vint16m1_t op1, int16_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_i16m1_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmerge_vvm_i16m2_tu (vint16m2_t merge, vint16m2_t op1, vint16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_i16m2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmerge_vxm_i16m2_tu (vint16m2_t merge, vint16m2_t op1, int16_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_i16m2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmerge_vvm_i16m4_tu (vint16m4_t merge, vint16m4_t op1, vint16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_i16m4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vmerge_vvm_f16m8(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmerge_vvm_f16m8(mask, op1, op2, vl); +vint16m4_t test_vmerge_vxm_i16m4_tu (vint16m4_t merge, vint16m4_t op1, int16_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_i16m4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmerge_vvm_i16m8_tu (vint16m8_t merge, vint16m8_t op1, vint16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_i16m8_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmerge_vxm_i16m8_tu (vint16m8_t merge, vint16m8_t op1, int16_t op2, vbool2_t mask, size_t vl) { + return vmerge_vxm_i16m8_tu(merge, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2_tu( @@ -1035,8 +1172,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmerge_vvm_i32mf2_tu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmerge_vvm_i32mf2_tu(mask, merge, op1, op2, vl); +vint32mf2_t test_vmerge_vvm_i32mf2_tu (vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_i32mf2_tu(merge, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2_tu( @@ -1044,78 +1181,1608 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmerge_vxm_i32mf2_tu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmerge_vxm_i32mf2_tu(mask, merge, op1, op2, vl); +vint32mf2_t test_vmerge_vxm_i32mf2_tu (vint32mf2_t merge, vint32mf2_t op1, int32_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_i32mf2_tu(merge, op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2_tu( +// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmerge_vvm_u32mf2_tu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmerge_vvm_u32mf2_tu(mask, merge, op1, op2, vl); +vint32m1_t test_vmerge_vvm_i32m1_tu (vint32m1_t merge, vint32m1_t op1, vint32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_i32m1_tu(merge, op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2_tu( +// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmerge_vxm_u32mf2_tu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmerge_vxm_u32mf2_tu(mask, merge, op1, op2, vl); +vint32m1_t test_vmerge_vxm_i32m1_tu (vint32m1_t merge, vint32m1_t op1, int32_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_i32m1_tu(merge, op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2_ta( +// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmerge_vvm_i32mf2_ta(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmerge_vvm_i32mf2_ta(mask, op1, op2, vl); +vint32m2_t test_vmerge_vvm_i32m2_tu (vint32m2_t merge, vint32m2_t op1, vint32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_i32m2_tu(merge, op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2_ta( +// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmerge_vxm_i32mf2_ta(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmerge_vxm_i32mf2_ta(mask, op1, op2, vl); +vint32m2_t test_vmerge_vxm_i32m2_tu (vint32m2_t merge, vint32m2_t op1, int32_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_i32m2_tu(merge, op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2_ta( +// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmerge_vvm_u32mf2_ta(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmerge_vvm_u32mf2_ta(mask, op1, op2, vl); +vint32m4_t test_vmerge_vvm_i32m4_tu (vint32m4_t merge, vint32m4_t op1, vint32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_i32m4_tu(merge, op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2_ta( +// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmerge_vxm_u32mf2_ta(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmerge_vxm_u32mf2_ta(mask, op1, op2, vl); +vint32m4_t test_vmerge_vxm_i32m4_tu (vint32m4_t merge, vint32m4_t op1, int32_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_i32m4_tu(merge, op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2_tu( +// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vmerge_vvm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmerge_vvm_f32mf2_tu(mask, merge, op1, op2, vl); +vint32m8_t test_vmerge_vvm_i32m8_tu (vint32m8_t merge, vint32m8_t op1, vint32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_i32m8_tu(merge, op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2_ta( +// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmerge_vxm_i32m8_tu (vint32m8_t merge, vint32m8_t op1, int32_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_i32m8_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmerge_vvm_i64m1_tu (vint64m1_t merge, vint64m1_t op1, vint64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_i64m1_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmerge_vxm_i64m1_tu (vint64m1_t merge, vint64m1_t op1, int64_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_i64m1_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmerge_vvm_i64m2_tu (vint64m2_t merge, vint64m2_t op1, vint64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_i64m2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmerge_vxm_i64m2_tu (vint64m2_t merge, vint64m2_t op1, int64_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_i64m2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmerge_vvm_i64m4_tu (vint64m4_t merge, vint64m4_t op1, vint64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_i64m4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmerge_vxm_i64m4_tu (vint64m4_t merge, vint64m4_t op1, int64_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_i64m4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmerge_vvm_i64m8_tu (vint64m8_t merge, vint64m8_t op1, vint64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_i64m8_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmerge_vxm_i64m8_tu (vint64m8_t merge, vint64m8_t op1, int64_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_i64m8_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmerge_vvm_u8mf8_tu (vuint8mf8_t merge, vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_u8mf8_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmerge_vxm_u8mf8_tu (vuint8mf8_t merge, vuint8mf8_t op1, uint8_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_u8mf8_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmerge_vvm_u8mf4_tu (vuint8mf4_t merge, vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_u8mf4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmerge_vxm_u8mf4_tu (vuint8mf4_t merge, vuint8mf4_t op1, uint8_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_u8mf4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmerge_vvm_u8mf2_tu (vuint8mf2_t merge, vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_u8mf2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmerge_vxm_u8mf2_tu (vuint8mf2_t merge, vuint8mf2_t op1, uint8_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_u8mf2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmerge_vvm_u8m1_tu (vuint8m1_t merge, vuint8m1_t op1, vuint8m1_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_u8m1_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmerge_vxm_u8m1_tu (vuint8m1_t merge, vuint8m1_t op1, uint8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_u8m1_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmerge_vvm_u8m2_tu (vuint8m2_t merge, vuint8m2_t op1, vuint8m2_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_u8m2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmerge_vxm_u8m2_tu (vuint8m2_t merge, vuint8m2_t op1, uint8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_u8m2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmerge_vvm_u8m4_tu (vuint8m4_t merge, vuint8m4_t op1, vuint8m4_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_u8m4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmerge_vxm_u8m4_tu (vuint8m4_t merge, vuint8m4_t op1, uint8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vxm_u8m4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmerge_vvm_u8m8_tu (vuint8m8_t merge, vuint8m8_t op1, vuint8m8_t op2, vbool1_t mask, size_t vl) { + return vmerge_vvm_u8m8_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmerge_vxm_u8m8_tu (vuint8m8_t merge, vuint8m8_t op1, uint8_t op2, vbool1_t mask, size_t vl) { + return vmerge_vxm_u8m8_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmerge_vvm_u16mf4_tu (vuint16mf4_t merge, vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_u16mf4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmerge_vxm_u16mf4_tu (vuint16mf4_t merge, vuint16mf4_t op1, uint16_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_u16mf4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmerge_vvm_u16mf2_tu (vuint16mf2_t merge, vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_u16mf2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmerge_vxm_u16mf2_tu (vuint16mf2_t merge, vuint16mf2_t op1, uint16_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_u16mf2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmerge_vvm_u16m1_tu (vuint16m1_t merge, vuint16m1_t op1, vuint16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_u16m1_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmerge_vxm_u16m1_tu (vuint16m1_t merge, vuint16m1_t op1, uint16_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_u16m1_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmerge_vvm_u16m2_tu (vuint16m2_t merge, vuint16m2_t op1, vuint16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_u16m2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmerge_vxm_u16m2_tu (vuint16m2_t merge, vuint16m2_t op1, uint16_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_u16m2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmerge_vvm_u16m4_tu (vuint16m4_t merge, vuint16m4_t op1, vuint16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_u16m4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmerge_vxm_u16m4_tu (vuint16m4_t merge, vuint16m4_t op1, uint16_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_u16m4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmerge_vvm_u16m8_tu (vuint16m8_t merge, vuint16m8_t op1, vuint16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_u16m8_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmerge_vxm_u16m8_tu (vuint16m8_t merge, vuint16m8_t op1, uint16_t op2, vbool2_t mask, size_t vl) { + return vmerge_vxm_u16m8_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmerge_vvm_u32mf2_tu (vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_u32mf2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmerge_vxm_u32mf2_tu (vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_u32mf2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmerge_vvm_u32m1_tu (vuint32m1_t merge, vuint32m1_t op1, vuint32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_u32m1_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmerge_vxm_u32m1_tu (vuint32m1_t merge, vuint32m1_t op1, uint32_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_u32m1_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmerge_vvm_u32m2_tu (vuint32m2_t merge, vuint32m2_t op1, vuint32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_u32m2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmerge_vxm_u32m2_tu (vuint32m2_t merge, vuint32m2_t op1, uint32_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_u32m2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmerge_vvm_u32m4_tu (vuint32m4_t merge, vuint32m4_t op1, vuint32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_u32m4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmerge_vxm_u32m4_tu (vuint32m4_t merge, vuint32m4_t op1, uint32_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_u32m4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmerge_vvm_u32m8_tu (vuint32m8_t merge, vuint32m8_t op1, vuint32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_u32m8_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmerge_vxm_u32m8_tu (vuint32m8_t merge, vuint32m8_t op1, uint32_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_u32m8_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmerge_vvm_u64m1_tu (vuint64m1_t merge, vuint64m1_t op1, vuint64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_u64m1_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmerge_vxm_u64m1_tu (vuint64m1_t merge, vuint64m1_t op1, uint64_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_u64m1_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmerge_vvm_u64m2_tu (vuint64m2_t merge, vuint64m2_t op1, vuint64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_u64m2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmerge_vxm_u64m2_tu (vuint64m2_t merge, vuint64m2_t op1, uint64_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_u64m2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmerge_vvm_u64m4_tu (vuint64m4_t merge, vuint64m4_t op1, vuint64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_u64m4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmerge_vxm_u64m4_tu (vuint64m4_t merge, vuint64m4_t op1, uint64_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_u64m4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmerge_vvm_u64m8_tu (vuint64m8_t merge, vuint64m8_t op1, vuint64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_u64m8_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmerge_vxm_u64m8_tu (vuint64m8_t merge, vuint64m8_t op1, uint64_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_u64m8_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmerge_vvm_i8mf8_ta (vint8mf8_t op1, vint8mf8_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_i8mf8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmerge_vxm_i8mf8_ta (vint8mf8_t op1, int8_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_i8mf8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmerge_vvm_i8mf4_ta (vint8mf4_t op1, vint8mf4_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_i8mf4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmerge_vxm_i8mf4_ta (vint8mf4_t op1, int8_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_i8mf4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmerge_vvm_i8mf2_ta (vint8mf2_t op1, vint8mf2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_i8mf2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmerge_vxm_i8mf2_ta (vint8mf2_t op1, int8_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_i8mf2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmerge_vvm_i8m1_ta (vint8m1_t op1, vint8m1_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_i8m1_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmerge_vxm_i8m1_ta (vint8m1_t op1, int8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_i8m1_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmerge_vvm_i8m2_ta (vint8m2_t op1, vint8m2_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_i8m2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmerge_vxm_i8m2_ta (vint8m2_t op1, int8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_i8m2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmerge_vvm_i8m4_ta (vint8m4_t op1, vint8m4_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_i8m4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmerge_vxm_i8m4_ta (vint8m4_t op1, int8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vxm_i8m4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmerge_vvm_i8m8_ta (vint8m8_t op1, vint8m8_t op2, vbool1_t mask, size_t vl) { + return vmerge_vvm_i8m8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmerge_vxm_i8m8_ta (vint8m8_t op1, int8_t op2, vbool1_t mask, size_t vl) { + return vmerge_vxm_i8m8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmerge_vvm_i16mf4_ta (vint16mf4_t op1, vint16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_i16mf4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmerge_vxm_i16mf4_ta (vint16mf4_t op1, int16_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_i16mf4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmerge_vvm_i16mf2_ta (vint16mf2_t op1, vint16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_i16mf2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmerge_vxm_i16mf2_ta (vint16mf2_t op1, int16_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_i16mf2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmerge_vvm_i16m1_ta (vint16m1_t op1, vint16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_i16m1_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmerge_vxm_i16m1_ta (vint16m1_t op1, int16_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_i16m1_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmerge_vvm_i16m2_ta (vint16m2_t op1, vint16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_i16m2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmerge_vxm_i16m2_ta (vint16m2_t op1, int16_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_i16m2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmerge_vvm_i16m4_ta (vint16m4_t op1, vint16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_i16m4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmerge_vxm_i16m4_ta (vint16m4_t op1, int16_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_i16m4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmerge_vvm_i16m8_ta (vint16m8_t op1, vint16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_i16m8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmerge_vxm_i16m8_ta (vint16m8_t op1, int16_t op2, vbool2_t mask, size_t vl) { + return vmerge_vxm_i16m8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmerge_vvm_i32mf2_ta (vint32mf2_t op1, vint32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_i32mf2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmerge_vxm_i32mf2_ta (vint32mf2_t op1, int32_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_i32mf2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmerge_vvm_i32m1_ta (vint32m1_t op1, vint32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_i32m1_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmerge_vxm_i32m1_ta (vint32m1_t op1, int32_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_i32m1_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmerge_vvm_i32m2_ta (vint32m2_t op1, vint32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_i32m2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmerge_vxm_i32m2_ta (vint32m2_t op1, int32_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_i32m2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmerge_vvm_i32m4_ta (vint32m4_t op1, vint32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_i32m4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmerge_vxm_i32m4_ta (vint32m4_t op1, int32_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_i32m4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmerge_vvm_i32m8_ta (vint32m8_t op1, vint32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_i32m8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmerge_vxm_i32m8_ta (vint32m8_t op1, int32_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_i32m8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmerge_vvm_i64m1_ta (vint64m1_t op1, vint64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_i64m1_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmerge_vxm_i64m1_ta (vint64m1_t op1, int64_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_i64m1_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmerge_vvm_i64m2_ta (vint64m2_t op1, vint64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_i64m2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmerge_vxm_i64m2_ta (vint64m2_t op1, int64_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_i64m2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmerge_vvm_i64m4_ta (vint64m4_t op1, vint64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_i64m4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmerge_vxm_i64m4_ta (vint64m4_t op1, int64_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_i64m4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmerge_vvm_i64m8_ta (vint64m8_t op1, vint64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_i64m8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmerge_vxm_i64m8_ta (vint64m8_t op1, int64_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_i64m8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmerge_vvm_u8mf8_ta (vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_u8mf8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmerge_vxm_u8mf8_ta (vuint8mf8_t op1, uint8_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_u8mf8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmerge_vvm_u8mf4_ta (vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_u8mf4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmerge_vxm_u8mf4_ta (vuint8mf4_t op1, uint8_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_u8mf4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmerge_vvm_u8mf2_ta (vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_u8mf2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmerge_vxm_u8mf2_ta (vuint8mf2_t op1, uint8_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_u8mf2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmerge_vvm_u8m1_ta (vuint8m1_t op1, vuint8m1_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_u8m1_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmerge_vxm_u8m1_ta (vuint8m1_t op1, uint8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_u8m1_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmerge_vvm_u8m2_ta (vuint8m2_t op1, vuint8m2_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_u8m2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmerge_vxm_u8m2_ta (vuint8m2_t op1, uint8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_u8m2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmerge_vvm_u8m4_ta (vuint8m4_t op1, vuint8m4_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_u8m4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmerge_vxm_u8m4_ta (vuint8m4_t op1, uint8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vxm_u8m4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmerge_vvm_u8m8_ta (vuint8m8_t op1, vuint8m8_t op2, vbool1_t mask, size_t vl) { + return vmerge_vvm_u8m8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmerge_vxm_u8m8_ta (vuint8m8_t op1, uint8_t op2, vbool1_t mask, size_t vl) { + return vmerge_vxm_u8m8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmerge_vvm_u16mf4_ta (vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_u16mf4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmerge_vxm_u16mf4_ta (vuint16mf4_t op1, uint16_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_u16mf4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmerge_vvm_u16mf2_ta (vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_u16mf2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmerge_vxm_u16mf2_ta (vuint16mf2_t op1, uint16_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_u16mf2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmerge_vvm_u16m1_ta (vuint16m1_t op1, vuint16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_u16m1_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmerge_vxm_u16m1_ta (vuint16m1_t op1, uint16_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_u16m1_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmerge_vvm_u16m2_ta (vuint16m2_t op1, vuint16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_u16m2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmerge_vxm_u16m2_ta (vuint16m2_t op1, uint16_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_u16m2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmerge_vvm_u16m4_ta (vuint16m4_t op1, vuint16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_u16m4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmerge_vxm_u16m4_ta (vuint16m4_t op1, uint16_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_u16m4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmerge_vvm_u16m8_ta (vuint16m8_t op1, vuint16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_u16m8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmerge_vxm_u16m8_ta (vuint16m8_t op1, uint16_t op2, vbool2_t mask, size_t vl) { + return vmerge_vxm_u16m8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmerge_vvm_u32mf2_ta (vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_u32mf2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmerge_vxm_u32mf2_ta (vuint32mf2_t op1, uint32_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_u32mf2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmerge_vvm_u32m1_ta (vuint32m1_t op1, vuint32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_u32m1_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmerge_vxm_u32m1_ta (vuint32m1_t op1, uint32_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_u32m1_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmerge_vvm_u32m2_ta (vuint32m2_t op1, vuint32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_u32m2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmerge_vxm_u32m2_ta (vuint32m2_t op1, uint32_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_u32m2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmerge_vvm_u32m4_ta (vuint32m4_t op1, vuint32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_u32m4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmerge_vxm_u32m4_ta (vuint32m4_t op1, uint32_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_u32m4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmerge_vvm_u32m8_ta (vuint32m8_t op1, vuint32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_u32m8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmerge_vxm_u32m8_ta (vuint32m8_t op1, uint32_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_u32m8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmerge_vvm_u64m1_ta (vuint64m1_t op1, vuint64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_u64m1_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmerge_vxm_u64m1_ta (vuint64m1_t op1, uint64_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_u64m1_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmerge_vvm_u64m2_ta (vuint64m2_t op1, vuint64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_u64m2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmerge_vxm_u64m2_ta (vuint64m2_t op1, uint64_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_u64m2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmerge_vvm_u64m4_ta (vuint64m4_t op1, vuint64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_u64m4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmerge_vxm_u64m4_ta (vuint64m4_t op1, uint64_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_u64m4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmerge_vvm_u64m8_ta (vuint64m8_t op1, vuint64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_u64m8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmerge_vxm_u64m8_ta (vuint64m8_t op1, uint64_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_u64m8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f16.nxv1f16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vmerge_vvm_f16mf4_tu (vfloat16mf4_t merge, vfloat16mf4_t op1, vfloat16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_f16mf4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f16.nxv2f16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vmerge_vvm_f16mf2_tu (vfloat16mf2_t merge, vfloat16mf2_t op1, vfloat16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_f16mf2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f16.nxv4f16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vmerge_vvm_f16m1_tu (vfloat16m1_t merge, vfloat16m1_t op1, vfloat16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_f16m1_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f16.nxv8f16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vmerge_vvm_f16m2_tu (vfloat16m2_t merge, vfloat16m2_t op1, vfloat16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_f16m2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f16.nxv16f16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vmerge_vvm_f16m4_tu (vfloat16m4_t merge, vfloat16m4_t op1, vfloat16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_f16m4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32f16.nxv32f16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vmerge_vvm_f16m8_tu (vfloat16m8_t merge, vfloat16m8_t op1, vfloat16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_f16m8_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vmerge_vvm_f32mf2_tu (vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_f32mf2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f32.nxv2f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vmerge_vvm_f32m1_tu (vfloat32m1_t merge, vfloat32m1_t op1, vfloat32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_f32m1_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f32.nxv4f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vmerge_vvm_f32m2_tu (vfloat32m2_t merge, vfloat32m2_t op1, vfloat32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_f32m2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f32.nxv8f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vmerge_vvm_f32m4_tu (vfloat32m4_t merge, vfloat32m4_t op1, vfloat32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_f32m4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f32.nxv16f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vmerge_vvm_f32m8_tu (vfloat32m8_t merge, vfloat32m8_t op1, vfloat32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_f32m8_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f64.nxv1f64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vmerge_vvm_f64m1_tu (vfloat64m1_t merge, vfloat64m1_t op1, vfloat64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_f64m1_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f64.nxv2f64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vmerge_vvm_f64m2_tu (vfloat64m2_t merge, vfloat64m2_t op1, vfloat64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_f64m2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f64.nxv4f64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vmerge_vvm_f64m4_tu (vfloat64m4_t merge, vfloat64m4_t op1, vfloat64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_f64m4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f64.nxv8f64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vmerge_vvm_f64m8_tu (vfloat64m8_t merge, vfloat64m8_t op1, vfloat64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_f64m8_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vmerge_vvm_f16mf4_ta (vfloat16mf4_t op1, vfloat16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_f16mf4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vmerge_vvm_f16mf2_ta (vfloat16mf2_t op1, vfloat16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_f16mf2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vmerge_vvm_f16m1_ta (vfloat16m1_t op1, vfloat16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_f16m1_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vmerge_vvm_f16m2_ta (vfloat16m2_t op1, vfloat16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_f16m2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vmerge_vvm_f16m4_ta (vfloat16m4_t op1, vfloat16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_f16m4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vmerge_vvm_f16m8_ta (vfloat16m8_t op1, vfloat16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_f16m8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vmerge_vvm_f32mf2_ta (vfloat32mf2_t op1, vfloat32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_f32mf2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vmerge_vvm_f32m1_ta (vfloat32m1_t op1, vfloat32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_f32m1_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vmerge_vvm_f32m2_ta (vfloat32m2_t op1, vfloat32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_f32m2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vmerge_vvm_f32m4_ta (vfloat32m4_t op1, vfloat32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_f32m4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vmerge_vvm_f32m8_ta (vfloat32m8_t op1, vfloat32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_f32m8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vmerge_vvm_f64m1_ta (vfloat64m1_t op1, vfloat64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_f64m1_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vmerge_vvm_f64m2_ta (vfloat64m2_t op1, vfloat64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_f64m2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vmerge_vvm_f64m4_ta (vfloat64m4_t op1, vfloat64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_f64m4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vmerge_vvm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmerge_vvm_f32mf2_ta(mask, op1, op2, vl); +vfloat64m8_t test_vmerge_vvm_f64m8_ta (vfloat64m8_t op1, vfloat64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_f64m8_ta(op1, op2, mask, vl); }