diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -137,8 +137,6 @@ def NonePolicy : PolicyScheme<0>; def HasPassthruOperand : PolicyScheme<1>; def HasPolicyOperand : PolicyScheme<2>; -// Specail case for passthru operand which is not a first opeand. -def HasPassthruOperandAtIdx1 : PolicyScheme<3>; class RVVBuiltin { @@ -1852,20 +1850,19 @@ // 12.15. Vector Integer Merge Instructions // C/C++ Operand: (mask, op1, op2, vl), Intrinsic: (passthru, op1, op2, mask, vl) let HasMasked = false, - UnMaskedPolicyScheme = HasPassthruOperandAtIdx1, + UnMaskedPolicyScheme = HasPassthruOperand, MaskedPolicyScheme = NonePolicy, ManualCodegen = [{ - std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); // insert poison passthru if (PolicyAttrs == TAIL_AGNOSTIC) Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType)); IntrinsicTypes = {ResultType, Ops[2]->getType(), Ops.back()->getType()}; }] in { defm vmerge : RVVOutOp1BuiltinSet<"vmerge", "csil", - [["vvm", "v", "vmvv"], - ["vxm", "v", "vmve"], - ["vvm", "Uv", "UvmUvUv"], - ["vxm", "Uv", "UvmUvUe"]]>; + [["vvm", "v", "vvvm"], + ["vxm", "v", "vvem"], + ["vvm", "Uv", "UvUvUvm"], + ["vxm", "Uv", "UvUvUem"]]>; } // 12.16. Vector Integer Move Instructions @@ -1996,19 +1993,18 @@ // 14.15. Vector Floating-Point Merge Instructio // C/C++ Operand: (mask, op1, op2, vl), Builtin: (op1, op2, mask, vl) let HasMasked = false, - UnMaskedPolicyScheme = HasPassthruOperandAtIdx1, + UnMaskedPolicyScheme = HasPassthruOperand, MaskedPolicyScheme = NonePolicy, ManualCodegen = [{ - std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); // insert poison passthru if (PolicyAttrs == TAIL_AGNOSTIC) Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType)); IntrinsicTypes = {ResultType, Ops[2]->getType(), Ops.back()->getType()}; }] in { defm vmerge : RVVOutOp1BuiltinSet<"vmerge", "xfd", - [["vvm", "v", "vmvv"]]>; + [["vvm", "v", "vvvm"]]>; defm vfmerge : RVVOutOp1BuiltinSet<"vfmerge", "xfd", - [["vfm", "v", "vmve"]]>; + [["vfm", "v", "vvem"]]>; } // 14.16. Vector Floating-Point Move Instruction @@ -2196,10 +2192,9 @@ // 17.5. Vector Compress Instruction let IsPrototypeDefaultTU = true, HasMasked = false, - UnMaskedPolicyScheme = HasPassthruOperandAtIdx1, + UnMaskedPolicyScheme = HasPassthruOperand, MaskedPolicyScheme = NonePolicy, ManualCodegen = [{ - std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); // insert poison passthru if (PolicyAttrs == TAIL_AGNOSTIC) Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType)); @@ -2207,10 +2202,10 @@ }] in { // signed and floating type defm vcompress : RVVOutBuiltinSet<"vcompress", "csilxfd", - [["vm", "v", "vmvv"]]>; + [["vm", "v", "vvvm"]]>; // unsigned type defm vcompress : RVVOutBuiltinSet<"vcompress", "csil", - [["vm", "Uv", "UvmUvUv"]]>; + [["vm", "Uv", "UvUvUvm"]]>; } // Miscellaneous diff --git a/clang/include/clang/Support/RISCVVIntrinsicUtils.h b/clang/include/clang/Support/RISCVVIntrinsicUtils.h --- a/clang/include/clang/Support/RISCVVIntrinsicUtils.h +++ b/clang/include/clang/Support/RISCVVIntrinsicUtils.h @@ -366,9 +366,6 @@ // Passthru operand is at first parameter in C builtin. HasPassthruOperand, HasPolicyOperand, - // Special case for vmerge, the passthru operand is second - // parameter in C builtin. - HasPassthruOperandAtIdx1, }; // TODO refactor RVVIntrinsic class design after support all intrinsic diff --git a/clang/lib/Support/RISCVVIntrinsicUtils.cpp b/clang/lib/Support/RISCVVIntrinsicUtils.cpp --- a/clang/lib/Support/RISCVVIntrinsicUtils.cpp +++ b/clang/lib/Support/RISCVVIntrinsicUtils.cpp @@ -964,15 +964,6 @@ else if (PolicyAttrs.isTAPolicy() && HasPassthruOp && IsPrototypeDefaultTU) NewPrototype.erase(NewPrototype.begin() + 1); - if (DefaultScheme == PolicyScheme::HasPassthruOperandAtIdx1) { - if (PolicyAttrs.isTUPolicy() && !IsPrototypeDefaultTU) { - // Insert undisturbed output to index 1 - NewPrototype.insert(NewPrototype.begin() + 2, NewPrototype[0]); - } else if (PolicyAttrs.isTAPolicy() && IsPrototypeDefaultTU) { - // Erase passthru for TA policy - NewPrototype.erase(NewPrototype.begin() + 2); - } - } } else if (PolicyAttrs.isTUPolicy() && HasPassthruOp) { // NF > 1 cases for segment load operations. // Convert diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vcompress.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vcompress.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vcompress.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vcompress.c @@ -1,6 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +v \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +v -target-feature +zfh -target-feature +experimental-zvfh \ // RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include @@ -10,8 +11,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vcompress_vm_i8mf8 (vbool64_t mask, vint8mf8_t dest, vint8mf8_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint8mf8_t test_vcompress_vm_i8mf8 (vint8mf8_t dest, vint8mf8_t src, vbool64_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf4( @@ -19,8 +20,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vcompress_vm_i8mf4 (vbool32_t mask, vint8mf4_t dest, vint8mf4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint8mf4_t test_vcompress_vm_i8mf4 (vint8mf4_t dest, vint8mf4_t src, vbool32_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf2( @@ -28,8 +29,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vcompress_vm_i8mf2 (vbool16_t mask, vint8mf2_t dest, vint8mf2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint8mf2_t test_vcompress_vm_i8mf2 (vint8mf2_t dest, vint8mf2_t src, vbool16_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m1( @@ -37,8 +38,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vcompress_vm_i8m1 (vbool8_t mask, vint8m1_t dest, vint8m1_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint8m1_t test_vcompress_vm_i8m1 (vint8m1_t dest, vint8m1_t src, vbool8_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m2( @@ -46,8 +47,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vcompress_vm_i8m2 (vbool4_t mask, vint8m2_t dest, vint8m2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint8m2_t test_vcompress_vm_i8m2 (vint8m2_t dest, vint8m2_t src, vbool4_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m4( @@ -55,8 +56,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vcompress_vm_i8m4 (vbool2_t mask, vint8m4_t dest, vint8m4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint8m4_t test_vcompress_vm_i8m4 (vint8m4_t dest, vint8m4_t src, vbool2_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m8( @@ -64,8 +65,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv64i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vcompress_vm_i8m8 (vbool1_t mask, vint8m8_t dest, vint8m8_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint8m8_t test_vcompress_vm_i8m8 (vint8m8_t dest, vint8m8_t src, vbool1_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf4( @@ -73,8 +74,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vcompress_vm_i16mf4 (vbool64_t mask, vint16mf4_t dest, vint16mf4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint16mf4_t test_vcompress_vm_i16mf4 (vint16mf4_t dest, vint16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf2( @@ -82,8 +83,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vcompress_vm_i16mf2 (vbool32_t mask, vint16mf2_t dest, vint16mf2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint16mf2_t test_vcompress_vm_i16mf2 (vint16mf2_t dest, vint16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m1( @@ -91,8 +92,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vcompress_vm_i16m1 (vbool16_t mask, vint16m1_t dest, vint16m1_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint16m1_t test_vcompress_vm_i16m1 (vint16m1_t dest, vint16m1_t src, vbool16_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m2( @@ -100,8 +101,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vcompress_vm_i16m2 (vbool8_t mask, vint16m2_t dest, vint16m2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint16m2_t test_vcompress_vm_i16m2 (vint16m2_t dest, vint16m2_t src, vbool8_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m4( @@ -109,8 +110,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vcompress_vm_i16m4 (vbool4_t mask, vint16m4_t dest, vint16m4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint16m4_t test_vcompress_vm_i16m4 (vint16m4_t dest, vint16m4_t src, vbool4_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m8( @@ -118,8 +119,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vcompress_vm_i16m8 (vbool2_t mask, vint16m8_t dest, vint16m8_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint16m8_t test_vcompress_vm_i16m8 (vint16m8_t dest, vint16m8_t src, vbool2_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2( @@ -127,8 +128,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vcompress_vm_i32mf2 (vbool64_t mask, vint32mf2_t dest, vint32mf2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint32mf2_t test_vcompress_vm_i32mf2 (vint32mf2_t dest, vint32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m1( @@ -136,8 +137,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vcompress_vm_i32m1 (vbool32_t mask, vint32m1_t dest, vint32m1_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint32m1_t test_vcompress_vm_i32m1 (vint32m1_t dest, vint32m1_t src, vbool32_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m2( @@ -145,8 +146,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vcompress_vm_i32m2 (vbool16_t mask, vint32m2_t dest, vint32m2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint32m2_t test_vcompress_vm_i32m2 (vint32m2_t dest, vint32m2_t src, vbool16_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m4( @@ -154,8 +155,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vcompress_vm_i32m4 (vbool8_t mask, vint32m4_t dest, vint32m4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint32m4_t test_vcompress_vm_i32m4 (vint32m4_t dest, vint32m4_t src, vbool8_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m8( @@ -163,8 +164,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vcompress_vm_i32m8 (vbool4_t mask, vint32m8_t dest, vint32m8_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint32m8_t test_vcompress_vm_i32m8 (vint32m8_t dest, vint32m8_t src, vbool4_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m1( @@ -172,8 +173,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vcompress_vm_i64m1 (vbool64_t mask, vint64m1_t dest, vint64m1_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint64m1_t test_vcompress_vm_i64m1 (vint64m1_t dest, vint64m1_t src, vbool64_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m2( @@ -181,8 +182,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vcompress_vm_i64m2 (vbool32_t mask, vint64m2_t dest, vint64m2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint64m2_t test_vcompress_vm_i64m2 (vint64m2_t dest, vint64m2_t src, vbool32_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m4( @@ -190,8 +191,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vcompress_vm_i64m4 (vbool16_t mask, vint64m4_t dest, vint64m4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint64m4_t test_vcompress_vm_i64m4 (vint64m4_t dest, vint64m4_t src, vbool16_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m8( @@ -199,8 +200,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vcompress_vm_i64m8 (vbool8_t mask, vint64m8_t dest, vint64m8_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vint64m8_t test_vcompress_vm_i64m8 (vint64m8_t dest, vint64m8_t src, vbool8_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf8( @@ -208,8 +209,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vcompress_vm_u8mf8 (vbool64_t mask, vuint8mf8_t dest, vuint8mf8_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint8mf8_t test_vcompress_vm_u8mf8 (vuint8mf8_t dest, vuint8mf8_t src, vbool64_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf4( @@ -217,8 +218,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vcompress_vm_u8mf4 (vbool32_t mask, vuint8mf4_t dest, vuint8mf4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint8mf4_t test_vcompress_vm_u8mf4 (vuint8mf4_t dest, vuint8mf4_t src, vbool32_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf2( @@ -226,8 +227,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vcompress_vm_u8mf2 (vbool16_t mask, vuint8mf2_t dest, vuint8mf2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint8mf2_t test_vcompress_vm_u8mf2 (vuint8mf2_t dest, vuint8mf2_t src, vbool16_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m1( @@ -235,8 +236,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vcompress_vm_u8m1 (vbool8_t mask, vuint8m1_t dest, vuint8m1_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint8m1_t test_vcompress_vm_u8m1 (vuint8m1_t dest, vuint8m1_t src, vbool8_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m2( @@ -244,8 +245,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vcompress_vm_u8m2 (vbool4_t mask, vuint8m2_t dest, vuint8m2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint8m2_t test_vcompress_vm_u8m2 (vuint8m2_t dest, vuint8m2_t src, vbool4_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m4( @@ -253,8 +254,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vcompress_vm_u8m4 (vbool2_t mask, vuint8m4_t dest, vuint8m4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint8m4_t test_vcompress_vm_u8m4 (vuint8m4_t dest, vuint8m4_t src, vbool2_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m8( @@ -262,8 +263,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv64i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vcompress_vm_u8m8 (vbool1_t mask, vuint8m8_t dest, vuint8m8_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint8m8_t test_vcompress_vm_u8m8 (vuint8m8_t dest, vuint8m8_t src, vbool1_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf4( @@ -271,8 +272,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vcompress_vm_u16mf4 (vbool64_t mask, vuint16mf4_t dest, vuint16mf4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint16mf4_t test_vcompress_vm_u16mf4 (vuint16mf4_t dest, vuint16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf2( @@ -280,8 +281,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vcompress_vm_u16mf2 (vbool32_t mask, vuint16mf2_t dest, vuint16mf2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint16mf2_t test_vcompress_vm_u16mf2 (vuint16mf2_t dest, vuint16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m1( @@ -289,8 +290,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vcompress_vm_u16m1 (vbool16_t mask, vuint16m1_t dest, vuint16m1_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint16m1_t test_vcompress_vm_u16m1 (vuint16m1_t dest, vuint16m1_t src, vbool16_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m2( @@ -298,8 +299,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vcompress_vm_u16m2 (vbool8_t mask, vuint16m2_t dest, vuint16m2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint16m2_t test_vcompress_vm_u16m2 (vuint16m2_t dest, vuint16m2_t src, vbool8_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m4( @@ -307,8 +308,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vcompress_vm_u16m4 (vbool4_t mask, vuint16m4_t dest, vuint16m4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint16m4_t test_vcompress_vm_u16m4 (vuint16m4_t dest, vuint16m4_t src, vbool4_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m8( @@ -316,8 +317,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vcompress_vm_u16m8 (vbool2_t mask, vuint16m8_t dest, vuint16m8_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint16m8_t test_vcompress_vm_u16m8 (vuint16m8_t dest, vuint16m8_t src, vbool2_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2( @@ -325,8 +326,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vcompress_vm_u32mf2 (vbool64_t mask, vuint32mf2_t dest, vuint32mf2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint32mf2_t test_vcompress_vm_u32mf2 (vuint32mf2_t dest, vuint32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m1( @@ -334,8 +335,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vcompress_vm_u32m1 (vbool32_t mask, vuint32m1_t dest, vuint32m1_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint32m1_t test_vcompress_vm_u32m1 (vuint32m1_t dest, vuint32m1_t src, vbool32_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m2( @@ -343,8 +344,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vcompress_vm_u32m2 (vbool16_t mask, vuint32m2_t dest, vuint32m2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint32m2_t test_vcompress_vm_u32m2 (vuint32m2_t dest, vuint32m2_t src, vbool16_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m4( @@ -352,8 +353,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vcompress_vm_u32m4 (vbool8_t mask, vuint32m4_t dest, vuint32m4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint32m4_t test_vcompress_vm_u32m4 (vuint32m4_t dest, vuint32m4_t src, vbool8_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m8( @@ -361,8 +362,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vcompress_vm_u32m8 (vbool4_t mask, vuint32m8_t dest, vuint32m8_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint32m8_t test_vcompress_vm_u32m8 (vuint32m8_t dest, vuint32m8_t src, vbool4_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m1( @@ -370,8 +371,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vcompress_vm_u64m1 (vbool64_t mask, vuint64m1_t dest, vuint64m1_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint64m1_t test_vcompress_vm_u64m1 (vuint64m1_t dest, vuint64m1_t src, vbool64_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m2( @@ -379,8 +380,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vcompress_vm_u64m2 (vbool32_t mask, vuint64m2_t dest, vuint64m2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint64m2_t test_vcompress_vm_u64m2 (vuint64m2_t dest, vuint64m2_t src, vbool32_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m4( @@ -388,8 +389,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vcompress_vm_u64m4 (vbool16_t mask, vuint64m4_t dest, vuint64m4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint64m4_t test_vcompress_vm_u64m4 (vuint64m4_t dest, vuint64m4_t src, vbool16_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m8( @@ -397,8 +398,62 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vcompress_vm_u64m8 (vbool8_t mask, vuint64m8_t dest, vuint64m8_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vuint64m8_t test_vcompress_vm_u64m8 (vuint64m8_t dest, vuint64m8_t src, vbool8_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vcompress_vm_f16mf4 (vfloat16mf4_t dest, vfloat16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vcompress_vm_f16mf2 (vfloat16mf2_t dest, vfloat16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vcompress_vm_f16m1 (vfloat16m1_t dest, vfloat16m1_t src, vbool16_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vcompress_vm_f16m2 (vfloat16m2_t dest, vfloat16m2_t src, vbool8_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vcompress_vm_f16m4 (vfloat16m4_t dest, vfloat16m4_t src, vbool4_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vcompress_vm_f16m8 (vfloat16m8_t dest, vfloat16m8_t src, vbool2_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2( @@ -406,8 +461,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vcompress_vm_f32mf2 (vbool64_t mask, vfloat32mf2_t dest, vfloat32mf2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vfloat32mf2_t test_vcompress_vm_f32mf2 (vfloat32mf2_t dest, vfloat32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m1( @@ -415,8 +470,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vcompress_vm_f32m1 (vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vfloat32m1_t test_vcompress_vm_f32m1 (vfloat32m1_t dest, vfloat32m1_t src, vbool32_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m2( @@ -424,8 +479,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vcompress_vm_f32m2 (vbool16_t mask, vfloat32m2_t dest, vfloat32m2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vfloat32m2_t test_vcompress_vm_f32m2 (vfloat32m2_t dest, vfloat32m2_t src, vbool16_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m4( @@ -433,8 +488,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vcompress_vm_f32m4 (vbool8_t mask, vfloat32m4_t dest, vfloat32m4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vfloat32m4_t test_vcompress_vm_f32m4 (vfloat32m4_t dest, vfloat32m4_t src, vbool8_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m8( @@ -442,8 +497,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vcompress_vm_f32m8 (vbool4_t mask, vfloat32m8_t dest, vfloat32m8_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vfloat32m8_t test_vcompress_vm_f32m8 (vfloat32m8_t dest, vfloat32m8_t src, vbool4_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m1( @@ -451,8 +506,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vcompress_vm_f64m1 (vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vfloat64m1_t test_vcompress_vm_f64m1 (vfloat64m1_t dest, vfloat64m1_t src, vbool64_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m2( @@ -460,8 +515,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vcompress_vm_f64m2 (vbool32_t mask, vfloat64m2_t dest, vfloat64m2_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vfloat64m2_t test_vcompress_vm_f64m2 (vfloat64m2_t dest, vfloat64m2_t src, vbool32_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m4( @@ -469,8 +524,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vcompress_vm_f64m4 (vbool16_t mask, vfloat64m4_t dest, vfloat64m4_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vfloat64m4_t test_vcompress_vm_f64m4 (vfloat64m4_t dest, vfloat64m4_t src, vbool16_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m8( @@ -478,8 +533,125 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vcompress_vm_f64m8 (vbool8_t mask, vfloat64m8_t dest, vfloat64m8_t src, size_t vl) { - return vcompress(mask, dest, src, vl); +vfloat64m8_t test_vcompress_vm_f64m8 (vfloat64m8_t dest, vfloat64m8_t src, vbool8_t mask, size_t vl) { + return vcompress(dest, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8mf8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vcompress_vm_i8mf8_tu (vint8mf8_t merge, vint8mf8_t src, vbool64_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vcompress_vm_i8mf4_tu (vint8mf4_t merge, vint8mf4_t src, vbool32_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vcompress_vm_i8mf2_tu (vint8mf2_t merge, vint8mf2_t src, vbool16_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vcompress_vm_i8m1_tu (vint8m1_t merge, vint8m1_t src, vbool8_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vcompress_vm_i8m2_tu (vint8m2_t merge, vint8m2_t src, vbool4_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vcompress_vm_i8m4_tu (vint8m4_t merge, vint8m4_t src, vbool2_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv64i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vcompress_vm_i8m8_tu (vint8m8_t merge, vint8m8_t src, vbool1_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vcompress_vm_i16mf4_tu (vint16mf4_t merge, vint16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vcompress_vm_i16mf2_tu (vint16mf2_t merge, vint16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vcompress_vm_i16m1_tu (vint16m1_t merge, vint16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vcompress_vm_i16m2_tu (vint16m2_t merge, vint16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vcompress_vm_i16m4_tu (vint16m4_t merge, vint16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vcompress_vm_i16m8_tu (vint16m8_t merge, vint16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2_tu( @@ -487,8 +659,197 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vcompress_vm_i32mf2_tu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, size_t vl) { - return vcompress_tu(mask, merge, src, vl); +vint32mf2_t test_vcompress_vm_i32mf2_tu (vint32mf2_t merge, vint32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vcompress_vm_i32m1_tu (vint32m1_t merge, vint32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vcompress_vm_i32m2_tu (vint32m2_t merge, vint32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vcompress_vm_i32m4_tu (vint32m4_t merge, vint32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vcompress_vm_i32m8_tu (vint32m8_t merge, vint32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vcompress_vm_i64m1_tu (vint64m1_t merge, vint64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vcompress_vm_i64m2_tu (vint64m2_t merge, vint64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vcompress_vm_i64m4_tu (vint64m4_t merge, vint64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vcompress_vm_i64m8_tu (vint64m8_t merge, vint64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8mf8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vcompress_vm_u8mf8_tu (vuint8mf8_t merge, vuint8mf8_t src, vbool64_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vcompress_vm_u8mf4_tu (vuint8mf4_t merge, vuint8mf4_t src, vbool32_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vcompress_vm_u8mf2_tu (vuint8mf2_t merge, vuint8mf2_t src, vbool16_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vcompress_vm_u8m1_tu (vuint8m1_t merge, vuint8m1_t src, vbool8_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vcompress_vm_u8m2_tu (vuint8m2_t merge, vuint8m2_t src, vbool4_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vcompress_vm_u8m4_tu (vuint8m4_t merge, vuint8m4_t src, vbool2_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv64i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vcompress_vm_u8m8_tu (vuint8m8_t merge, vuint8m8_t src, vbool1_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vcompress_vm_u16mf4_tu (vuint16mf4_t merge, vuint16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vcompress_vm_u16mf2_tu (vuint16mf2_t merge, vuint16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vcompress_vm_u16m1_tu (vuint16m1_t merge, vuint16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vcompress_vm_u16m2_tu (vuint16m2_t merge, vuint16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vcompress_vm_u16m4_tu (vuint16m4_t merge, vuint16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vcompress_vm_u16m8_tu (vuint16m8_t merge, vuint16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2_tu( @@ -496,42 +857,744 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vcompress_vm_u32mf2_tu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, size_t vl) { - return vcompress_tu(mask, merge, src, vl); +vuint32mf2_t test_vcompress_vm_u32mf2_tu (vuint32mf2_t merge, vuint32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); } -// CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2_tu( +// CHECK-RV64-LABEL: @test_vcompress_vm_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vcompress_vm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t src, size_t vl) { - return vcompress_tu(mask, merge, src, vl); +vuint32m1_t test_vcompress_vm_u32m1_tu (vuint32m1_t merge, vuint32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); } -// CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2_ta( +// CHECK-RV64-LABEL: @test_vcompress_vm_u32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vcompress_vm_i32mf2_ta(vbool64_t mask, vint32mf2_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vuint32m2_t test_vcompress_vm_u32m2_tu (vuint32m2_t merge, vuint32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); } -// CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2_ta( +// CHECK-RV64-LABEL: @test_vcompress_vm_u32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vcompress_vm_u32mf2_ta(vbool64_t mask, vuint32mf2_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vuint32m4_t test_vcompress_vm_u32m4_tu (vuint32m4_t merge, vuint32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); } -// CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2_ta( +// CHECK-RV64-LABEL: @test_vcompress_vm_u32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vcompress_vm_u32m8_tu (vuint32m8_t merge, vuint32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vcompress_vm_u64m1_tu (vuint64m1_t merge, vuint64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vcompress_vm_u64m2_tu (vuint64m2_t merge, vuint64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vcompress_vm_u64m4_tu (vuint64m4_t merge, vuint64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vcompress_vm_u64m8_tu (vuint64m8_t merge, vuint64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vcompress_vm_f16mf4_tu (vfloat16mf4_t merge, vfloat16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vcompress_vm_f16mf2_tu (vfloat16mf2_t merge, vfloat16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vcompress_vm_f16m1_tu (vfloat16m1_t merge, vfloat16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vcompress_vm_f16m2_tu (vfloat16m2_t merge, vfloat16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vcompress_vm_f16m4_tu (vfloat16m4_t merge, vfloat16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32f16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vcompress_vm_f16m8_tu (vfloat16m8_t merge, vfloat16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vcompress_vm_f32mf2_tu (vfloat32mf2_t merge, vfloat32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vcompress_vm_f32m1_tu (vfloat32m1_t merge, vfloat32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vcompress_vm_f32m2_tu (vfloat32m2_t merge, vfloat32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vcompress_vm_f32m4_tu (vfloat32m4_t merge, vfloat32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vcompress_vm_f32m8_tu (vfloat32m8_t merge, vfloat32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vcompress_vm_f64m1_tu (vfloat64m1_t merge, vfloat64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vcompress_vm_f64m2_tu (vfloat64m2_t merge, vfloat64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vcompress_vm_f64m4_tu (vfloat64m4_t merge, vfloat64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vcompress_vm_f64m8_tu (vfloat64m8_t merge, vfloat64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8mf8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vcompress_vm_i8mf8_ta (vint8mf8_t src, vbool64_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vcompress_vm_i8mf4_ta (vint8mf4_t src, vbool32_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vcompress_vm_i8mf2_ta (vint8mf2_t src, vbool16_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vcompress_vm_i8m1_ta (vint8m1_t src, vbool8_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vcompress_vm_i8m2_ta (vint8m2_t src, vbool4_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vcompress_vm_i8m4_ta (vint8m4_t src, vbool2_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv64i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vcompress_vm_i8m8_ta (vint8m8_t src, vbool1_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vcompress_vm_i16mf4_ta (vint16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vcompress_vm_i16mf2_ta (vint16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vcompress_vm_i16m1_ta (vint16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vcompress_vm_i16m2_ta (vint16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vcompress_vm_i16m4_ta (vint16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vcompress_vm_i16m8_ta (vint16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vcompress_vm_i32mf2_ta (vint32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vcompress_vm_i32m1_ta (vint32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vcompress_vm_i32m2_ta (vint32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vcompress_vm_i32m4_ta (vint32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vcompress_vm_i32m8_ta (vint32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vcompress_vm_i64m1_ta (vint64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vcompress_vm_i64m2_ta (vint64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vcompress_vm_i64m4_ta (vint64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vcompress_vm_i64m8_ta (vint64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8mf8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vcompress_vm_u8mf8_ta (vuint8mf8_t src, vbool64_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vcompress_vm_u8mf4_ta (vuint8mf4_t src, vbool32_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vcompress_vm_u8mf2_ta (vuint8mf2_t src, vbool16_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vcompress_vm_u8m1_ta (vuint8m1_t src, vbool8_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vcompress_vm_u8m2_ta (vuint8m2_t src, vbool4_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vcompress_vm_u8m4_ta (vuint8m4_t src, vbool2_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv64i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vcompress_vm_u8m8_ta (vuint8m8_t src, vbool1_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vcompress_vm_u16mf4_ta (vuint16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vcompress_vm_u16mf2_ta (vuint16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vcompress_vm_u16m1_ta (vuint16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vcompress_vm_u16m2_ta (vuint16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vcompress_vm_u16m4_ta (vuint16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vcompress_vm_u16m8_ta (vuint16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vcompress_vm_u32mf2_ta (vuint32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vcompress_vm_u32m1_ta (vuint32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vcompress_vm_u32m2_ta (vuint32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vcompress_vm_u32m4_ta (vuint32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vcompress_vm_u32m8_ta (vuint32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vcompress_vm_u64m1_ta (vuint64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vcompress_vm_u64m2_ta (vuint64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vcompress_vm_u64m4_ta (vuint64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vcompress_vm_u64m8_ta (vuint64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vcompress_vm_f16mf4_ta (vfloat16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vcompress_vm_f16mf2_ta (vfloat16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vcompress_vm_f16m1_ta (vfloat16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vcompress_vm_f16m2_ta (vfloat16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vcompress_vm_f16m4_ta (vfloat16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vcompress_vm_f16m8_ta (vfloat16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vcompress_vm_f32mf2_ta (vfloat32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vcompress_vm_f32m1_ta (vfloat32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vcompress_vm_f32m2_ta (vfloat32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vcompress_vm_f32m4_ta (vfloat32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vcompress_vm_f32m8_ta (vfloat32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vcompress_vm_f64m1_ta (vfloat64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vcompress_vm_f64m2_ta (vfloat64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vcompress_vm_f64m4_ta (vfloat64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vcompress_vm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return vcompress_ta(mask, src, vl); +vfloat64m8_t test_vcompress_vm_f64m8_ta (vfloat64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_ta(src, mask, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmerge.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmerge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmerge.c @@ -1,18 +1,72 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +v \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +v -target-feature +zfh -target-feature +experimental-zvfh \ // RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmerge_vfm_f16mf4 (vfloat16mf4_t op1, _Float16 op2, vbool64_t mask, size_t vl) { + return vfmerge(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmerge_vfm_f16mf2 (vfloat16mf2_t op1, _Float16 op2, vbool32_t mask, size_t vl) { + return vfmerge(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmerge_vfm_f16m1 (vfloat16m1_t op1, _Float16 op2, vbool16_t mask, size_t vl) { + return vfmerge(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmerge_vfm_f16m2 (vfloat16m2_t op1, _Float16 op2, vbool8_t mask, size_t vl) { + return vfmerge(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmerge_vfm_f16m4 (vfloat16m4_t op1, _Float16 op2, vbool4_t mask, size_t vl) { + return vfmerge(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmerge_vfm_f16m8 (vfloat16m8_t op1, _Float16 op2, vbool2_t mask, size_t vl) { + return vfmerge(op1, op2, mask, vl); +} + // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmerge_vfm_f32mf2(vbool64_t mask, vfloat32mf2_t op1, - float op2, size_t vl) { - return vfmerge(mask, op1, op2, vl); +vfloat32mf2_t test_vfmerge_vfm_f32mf2 (vfloat32mf2_t op1, float op2, vbool64_t mask, size_t vl) { + return vfmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m1( @@ -20,9 +74,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmerge_vfm_f32m1(vbool32_t mask, vfloat32m1_t op1, float op2, - size_t vl) { - return vfmerge(mask, op1, op2, vl); +vfloat32m1_t test_vfmerge_vfm_f32m1 (vfloat32m1_t op1, float op2, vbool32_t mask, size_t vl) { + return vfmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m2( @@ -30,9 +83,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmerge_vfm_f32m2(vbool16_t mask, vfloat32m2_t op1, float op2, - size_t vl) { - return vfmerge(mask, op1, op2, vl); +vfloat32m2_t test_vfmerge_vfm_f32m2 (vfloat32m2_t op1, float op2, vbool16_t mask, size_t vl) { + return vfmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m4( @@ -40,9 +92,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmerge_vfm_f32m4(vbool8_t mask, vfloat32m4_t op1, float op2, - size_t vl) { - return vfmerge(mask, op1, op2, vl); +vfloat32m4_t test_vfmerge_vfm_f32m4 (vfloat32m4_t op1, float op2, vbool8_t mask, size_t vl) { + return vfmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m8( @@ -50,9 +101,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmerge_vfm_f32m8(vbool4_t mask, vfloat32m8_t op1, float op2, - size_t vl) { - return vfmerge(mask, op1, op2, vl); +vfloat32m8_t test_vfmerge_vfm_f32m8 (vfloat32m8_t op1, float op2, vbool4_t mask, size_t vl) { + return vfmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m1( @@ -60,9 +110,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmerge_vfm_f64m1(vbool64_t mask, vfloat64m1_t op1, - double op2, size_t vl) { - return vfmerge(mask, op1, op2, vl); +vfloat64m1_t test_vfmerge_vfm_f64m1 (vfloat64m1_t op1, double op2, vbool64_t mask, size_t vl) { + return vfmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m2( @@ -70,9 +119,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmerge_vfm_f64m2(vbool32_t mask, vfloat64m2_t op1, - double op2, size_t vl) { - return vfmerge(mask, op1, op2, vl); +vfloat64m2_t test_vfmerge_vfm_f64m2 (vfloat64m2_t op1, double op2, vbool32_t mask, size_t vl) { + return vfmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m4( @@ -80,9 +128,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmerge_vfm_f64m4(vbool16_t mask, vfloat64m4_t op1, - double op2, size_t vl) { - return vfmerge(mask, op1, op2, vl); +vfloat64m4_t test_vfmerge_vfm_f64m4 (vfloat64m4_t op1, double op2, vbool16_t mask, size_t vl) { + return vfmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m8( @@ -90,9 +137,62 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmerge_vfm_f64m8(vbool8_t mask, vfloat64m8_t op1, double op2, - size_t vl) { - return vfmerge(mask, op1, op2, vl); +vfloat64m8_t test_vfmerge_vfm_f64m8 (vfloat64m8_t op1, double op2, vbool8_t mask, size_t vl) { + return vfmerge(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f16.f16.i64( [[MERGE:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmerge_vfm_f16mf4_tu (vfloat16mf4_t merge, vfloat16mf4_t op1, _Float16 op2, vbool64_t mask, size_t vl) { + return vfmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f16.f16.i64( [[MERGE:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmerge_vfm_f16mf2_tu (vfloat16mf2_t merge, vfloat16mf2_t op1, _Float16 op2, vbool32_t mask, size_t vl) { + return vfmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f16.f16.i64( [[MERGE:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmerge_vfm_f16m1_tu (vfloat16m1_t merge, vfloat16m1_t op1, _Float16 op2, vbool16_t mask, size_t vl) { + return vfmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f16.f16.i64( [[MERGE:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmerge_vfm_f16m2_tu (vfloat16m2_t merge, vfloat16m2_t op1, _Float16 op2, vbool8_t mask, size_t vl) { + return vfmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f16.f16.i64( [[MERGE:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmerge_vfm_f16m4_tu (vfloat16m4_t merge, vfloat16m4_t op1, _Float16 op2, vbool4_t mask, size_t vl) { + return vfmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv32f16.f16.i64( [[MERGE:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmerge_vfm_f16m8_tu (vfloat16m8_t merge, vfloat16m8_t op1, _Float16 op2, vbool2_t mask, size_t vl) { + return vfmerge_tu(merge, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2_tu( @@ -100,8 +200,134 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmerge_vfm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { - return vfmerge_tu(mask, merge, op1, op2, vl); +vfloat32mf2_t test_vfmerge_vfm_f32mf2_tu (vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, vbool64_t mask, size_t vl) { + return vfmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmerge_vfm_f32m1_tu (vfloat32m1_t merge, vfloat32m1_t op1, float op2, vbool32_t mask, size_t vl) { + return vfmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmerge_vfm_f32m2_tu (vfloat32m2_t merge, vfloat32m2_t op1, float op2, vbool16_t mask, size_t vl) { + return vfmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmerge_vfm_f32m4_tu (vfloat32m4_t merge, vfloat32m4_t op1, float op2, vbool8_t mask, size_t vl) { + return vfmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmerge_vfm_f32m8_tu (vfloat32m8_t merge, vfloat32m8_t op1, float op2, vbool4_t mask, size_t vl) { + return vfmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f64.f64.i64( [[MERGE:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmerge_vfm_f64m1_tu (vfloat64m1_t merge, vfloat64m1_t op1, double op2, vbool64_t mask, size_t vl) { + return vfmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f64.f64.i64( [[MERGE:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmerge_vfm_f64m2_tu (vfloat64m2_t merge, vfloat64m2_t op1, double op2, vbool32_t mask, size_t vl) { + return vfmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f64.f64.i64( [[MERGE:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmerge_vfm_f64m4_tu (vfloat64m4_t merge, vfloat64m4_t op1, double op2, vbool16_t mask, size_t vl) { + return vfmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f64.f64.i64( [[MERGE:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmerge_vfm_f64m8_tu (vfloat64m8_t merge, vfloat64m8_t op1, double op2, vbool8_t mask, size_t vl) { + return vfmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmerge_vfm_f16mf4_ta (vfloat16mf4_t op1, _Float16 op2, vbool64_t mask, size_t vl) { + return vfmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmerge_vfm_f16mf2_ta (vfloat16mf2_t op1, _Float16 op2, vbool32_t mask, size_t vl) { + return vfmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmerge_vfm_f16m1_ta (vfloat16m1_t op1, _Float16 op2, vbool16_t mask, size_t vl) { + return vfmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmerge_vfm_f16m2_ta (vfloat16m2_t op1, _Float16 op2, vbool8_t mask, size_t vl) { + return vfmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmerge_vfm_f16m4_ta (vfloat16m4_t op1, _Float16 op2, vbool4_t mask, size_t vl) { + return vfmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmerge_vfm_f16m8_ta (vfloat16m8_t op1, _Float16 op2, vbool2_t mask, size_t vl) { + return vfmerge_ta(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2_ta( @@ -109,6 +335,78 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmerge_vfm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { - return vfmerge_ta(mask, op1, op2, vl); +vfloat32mf2_t test_vfmerge_vfm_f32mf2_ta (vfloat32mf2_t op1, float op2, vbool64_t mask, size_t vl) { + return vfmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmerge_vfm_f32m1_ta (vfloat32m1_t op1, float op2, vbool32_t mask, size_t vl) { + return vfmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmerge_vfm_f32m2_ta (vfloat32m2_t op1, float op2, vbool16_t mask, size_t vl) { + return vfmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmerge_vfm_f32m4_ta (vfloat32m4_t op1, float op2, vbool8_t mask, size_t vl) { + return vfmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmerge_vfm_f32m8_ta (vfloat32m8_t op1, float op2, vbool4_t mask, size_t vl) { + return vfmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmerge_vfm_f64m1_ta (vfloat64m1_t op1, double op2, vbool64_t mask, size_t vl) { + return vfmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmerge_vfm_f64m2_ta (vfloat64m2_t op1, double op2, vbool32_t mask, size_t vl) { + return vfmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmerge_vfm_f64m4_ta (vfloat64m4_t op1, double op2, vbool16_t mask, size_t vl) { + return vfmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmerge_vfm_f64m8_ta (vfloat64m8_t op1, double op2, vbool8_t mask, size_t vl) { + return vfmerge_ta(op1, op2, mask, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmerge.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmerge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmerge.c @@ -1,6 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +v \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +v -target-feature +zfh -target-feature +experimental-zvfh \ // RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include @@ -10,9 +11,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmerge_vvm_i8mf8(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint8mf8_t test_vmerge_vvm_i8mf8 (vint8mf8_t op1, vint8mf8_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf8( @@ -20,9 +20,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmerge_vxm_i8mf8(vbool64_t mask, vint8mf8_t op1, int8_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint8mf8_t test_vmerge_vxm_i8mf8 (vint8mf8_t op1, int8_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf4( @@ -30,9 +29,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmerge_vvm_i8mf4(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint8mf4_t test_vmerge_vvm_i8mf4 (vint8mf4_t op1, vint8mf4_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf4( @@ -40,9 +38,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmerge_vxm_i8mf4(vbool32_t mask, vint8mf4_t op1, int8_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint8mf4_t test_vmerge_vxm_i8mf4 (vint8mf4_t op1, int8_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf2( @@ -50,9 +47,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmerge_vvm_i8mf2(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint8mf2_t test_vmerge_vvm_i8mf2 (vint8mf2_t op1, vint8mf2_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf2( @@ -60,9 +56,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmerge_vxm_i8mf2(vbool16_t mask, vint8mf2_t op1, int8_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint8mf2_t test_vmerge_vxm_i8mf2 (vint8mf2_t op1, int8_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m1( @@ -70,9 +65,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmerge_vvm_i8m1(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint8m1_t test_vmerge_vvm_i8m1 (vint8m1_t op1, vint8m1_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m1( @@ -80,9 +74,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmerge_vxm_i8m1(vbool8_t mask, vint8m1_t op1, int8_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint8m1_t test_vmerge_vxm_i8m1 (vint8m1_t op1, int8_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m2( @@ -90,9 +83,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmerge_vvm_i8m2(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint8m2_t test_vmerge_vvm_i8m2 (vint8m2_t op1, vint8m2_t op2, vbool4_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m2( @@ -100,9 +92,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmerge_vxm_i8m2(vbool4_t mask, vint8m2_t op1, int8_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint8m2_t test_vmerge_vxm_i8m2 (vint8m2_t op1, int8_t op2, vbool4_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m4( @@ -110,9 +101,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmerge_vvm_i8m4(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint8m4_t test_vmerge_vvm_i8m4 (vint8m4_t op1, vint8m4_t op2, vbool2_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m4( @@ -120,9 +110,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmerge_vxm_i8m4(vbool2_t mask, vint8m4_t op1, int8_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint8m4_t test_vmerge_vxm_i8m4 (vint8m4_t op1, int8_t op2, vbool2_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m8( @@ -130,9 +119,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmerge_vvm_i8m8(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint8m8_t test_vmerge_vvm_i8m8 (vint8m8_t op1, vint8m8_t op2, vbool1_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m8( @@ -140,9 +128,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmerge_vxm_i8m8(vbool1_t mask, vint8m8_t op1, int8_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint8m8_t test_vmerge_vxm_i8m8 (vint8m8_t op1, int8_t op2, vbool1_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf4( @@ -150,9 +137,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmerge_vvm_i16mf4(vbool64_t mask, vint16mf4_t op1, - vint16mf4_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint16mf4_t test_vmerge_vvm_i16mf4 (vint16mf4_t op1, vint16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf4( @@ -160,9 +146,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmerge_vxm_i16mf4(vbool64_t mask, vint16mf4_t op1, int16_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint16mf4_t test_vmerge_vxm_i16mf4 (vint16mf4_t op1, int16_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf2( @@ -170,9 +155,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmerge_vvm_i16mf2(vbool32_t mask, vint16mf2_t op1, - vint16mf2_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint16mf2_t test_vmerge_vvm_i16mf2 (vint16mf2_t op1, vint16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf2( @@ -180,9 +164,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmerge_vxm_i16mf2(vbool32_t mask, vint16mf2_t op1, int16_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint16mf2_t test_vmerge_vxm_i16mf2 (vint16mf2_t op1, int16_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m1( @@ -190,9 +173,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmerge_vvm_i16m1(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint16m1_t test_vmerge_vvm_i16m1 (vint16m1_t op1, vint16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m1( @@ -200,9 +182,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmerge_vxm_i16m1(vbool16_t mask, vint16m1_t op1, int16_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint16m1_t test_vmerge_vxm_i16m1 (vint16m1_t op1, int16_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m2( @@ -210,9 +191,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmerge_vvm_i16m2(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint16m2_t test_vmerge_vvm_i16m2 (vint16m2_t op1, vint16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m2( @@ -220,9 +200,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmerge_vxm_i16m2(vbool8_t mask, vint16m2_t op1, int16_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint16m2_t test_vmerge_vxm_i16m2 (vint16m2_t op1, int16_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m4( @@ -230,9 +209,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmerge_vvm_i16m4(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint16m4_t test_vmerge_vvm_i16m4 (vint16m4_t op1, vint16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m4( @@ -240,9 +218,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmerge_vxm_i16m4(vbool4_t mask, vint16m4_t op1, int16_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint16m4_t test_vmerge_vxm_i16m4 (vint16m4_t op1, int16_t op2, vbool4_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m8( @@ -250,9 +227,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmerge_vvm_i16m8(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint16m8_t test_vmerge_vvm_i16m8 (vint16m8_t op1, vint16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m8( @@ -260,9 +236,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmerge_vxm_i16m8(vbool2_t mask, vint16m8_t op1, int16_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint16m8_t test_vmerge_vxm_i16m8 (vint16m8_t op1, int16_t op2, vbool2_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2( @@ -270,9 +245,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmerge_vvm_i32mf2(vbool64_t mask, vint32mf2_t op1, - vint32mf2_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vint32mf2_t test_vmerge_vvm_i32mf2 (vint32mf2_t op1, vint32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2( @@ -280,9 +254,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmerge_vxm_i32mf2(vbool64_t mask, vint32mf2_t op1, int32_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint32mf2_t test_vmerge_vxm_i32mf2 (vint32mf2_t op1, int32_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m1( @@ -290,9 +263,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmerge_vvm_i32m1(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint32m1_t test_vmerge_vvm_i32m1 (vint32m1_t op1, vint32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m1( @@ -300,9 +272,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmerge_vxm_i32m1(vbool32_t mask, vint32m1_t op1, int32_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint32m1_t test_vmerge_vxm_i32m1 (vint32m1_t op1, int32_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m2( @@ -310,9 +281,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmerge_vvm_i32m2(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint32m2_t test_vmerge_vvm_i32m2 (vint32m2_t op1, vint32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m2( @@ -320,9 +290,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmerge_vxm_i32m2(vbool16_t mask, vint32m2_t op1, int32_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint32m2_t test_vmerge_vxm_i32m2 (vint32m2_t op1, int32_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m4( @@ -330,9 +299,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmerge_vvm_i32m4(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint32m4_t test_vmerge_vvm_i32m4 (vint32m4_t op1, vint32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m4( @@ -340,9 +308,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmerge_vxm_i32m4(vbool8_t mask, vint32m4_t op1, int32_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint32m4_t test_vmerge_vxm_i32m4 (vint32m4_t op1, int32_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m8( @@ -350,9 +317,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmerge_vvm_i32m8(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint32m8_t test_vmerge_vvm_i32m8 (vint32m8_t op1, vint32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m8( @@ -360,9 +326,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmerge_vxm_i32m8(vbool4_t mask, vint32m8_t op1, int32_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint32m8_t test_vmerge_vxm_i32m8 (vint32m8_t op1, int32_t op2, vbool4_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m1( @@ -370,9 +335,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmerge_vvm_i64m1(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint64m1_t test_vmerge_vvm_i64m1 (vint64m1_t op1, vint64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m1( @@ -380,9 +344,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmerge_vxm_i64m1(vbool64_t mask, vint64m1_t op1, int64_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint64m1_t test_vmerge_vxm_i64m1 (vint64m1_t op1, int64_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m2( @@ -390,9 +353,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmerge_vvm_i64m2(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint64m2_t test_vmerge_vvm_i64m2 (vint64m2_t op1, vint64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m2( @@ -400,9 +362,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmerge_vxm_i64m2(vbool32_t mask, vint64m2_t op1, int64_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint64m2_t test_vmerge_vxm_i64m2 (vint64m2_t op1, int64_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m4( @@ -410,9 +371,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmerge_vvm_i64m4(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint64m4_t test_vmerge_vvm_i64m4 (vint64m4_t op1, vint64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m4( @@ -420,9 +380,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmerge_vxm_i64m4(vbool16_t mask, vint64m4_t op1, int64_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint64m4_t test_vmerge_vxm_i64m4 (vint64m4_t op1, int64_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m8( @@ -430,9 +389,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmerge_vvm_i64m8(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint64m8_t test_vmerge_vvm_i64m8 (vint64m8_t op1, vint64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m8( @@ -440,9 +398,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmerge_vxm_i64m8(vbool8_t mask, vint64m8_t op1, int64_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vint64m8_t test_vmerge_vxm_i64m8 (vint64m8_t op1, int64_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf8( @@ -450,9 +407,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmerge_vvm_u8mf8(vbool64_t mask, vuint8mf8_t op1, - vuint8mf8_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint8mf8_t test_vmerge_vvm_u8mf8 (vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf8( @@ -460,9 +416,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmerge_vxm_u8mf8(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint8mf8_t test_vmerge_vxm_u8mf8 (vuint8mf8_t op1, uint8_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf4( @@ -470,9 +425,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmerge_vvm_u8mf4(vbool32_t mask, vuint8mf4_t op1, - vuint8mf4_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint8mf4_t test_vmerge_vvm_u8mf4 (vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf4( @@ -480,9 +434,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmerge_vxm_u8mf4(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint8mf4_t test_vmerge_vxm_u8mf4 (vuint8mf4_t op1, uint8_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf2( @@ -490,9 +443,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmerge_vvm_u8mf2(vbool16_t mask, vuint8mf2_t op1, - vuint8mf2_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint8mf2_t test_vmerge_vvm_u8mf2 (vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf2( @@ -500,9 +452,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmerge_vxm_u8mf2(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint8mf2_t test_vmerge_vxm_u8mf2 (vuint8mf2_t op1, uint8_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m1( @@ -510,9 +461,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmerge_vvm_u8m1(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint8m1_t test_vmerge_vvm_u8m1 (vuint8m1_t op1, vuint8m1_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m1( @@ -520,9 +470,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmerge_vxm_u8m1(vbool8_t mask, vuint8m1_t op1, uint8_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint8m1_t test_vmerge_vxm_u8m1 (vuint8m1_t op1, uint8_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m2( @@ -530,9 +479,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmerge_vvm_u8m2(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint8m2_t test_vmerge_vvm_u8m2 (vuint8m2_t op1, vuint8m2_t op2, vbool4_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m2( @@ -540,9 +488,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmerge_vxm_u8m2(vbool4_t mask, vuint8m2_t op1, uint8_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint8m2_t test_vmerge_vxm_u8m2 (vuint8m2_t op1, uint8_t op2, vbool4_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m4( @@ -550,9 +497,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmerge_vvm_u8m4(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint8m4_t test_vmerge_vvm_u8m4 (vuint8m4_t op1, vuint8m4_t op2, vbool2_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m4( @@ -560,9 +506,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmerge_vxm_u8m4(vbool2_t mask, vuint8m4_t op1, uint8_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint8m4_t test_vmerge_vxm_u8m4 (vuint8m4_t op1, uint8_t op2, vbool2_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m8( @@ -570,9 +515,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmerge_vvm_u8m8(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint8m8_t test_vmerge_vvm_u8m8 (vuint8m8_t op1, vuint8m8_t op2, vbool1_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m8( @@ -580,9 +524,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmerge_vxm_u8m8(vbool1_t mask, vuint8m8_t op1, uint8_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint8m8_t test_vmerge_vxm_u8m8 (vuint8m8_t op1, uint8_t op2, vbool1_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf4( @@ -590,9 +533,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmerge_vvm_u16mf4(vbool64_t mask, vuint16mf4_t op1, - vuint16mf4_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint16mf4_t test_vmerge_vvm_u16mf4 (vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf4( @@ -600,9 +542,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmerge_vxm_u16mf4(vbool64_t mask, vuint16mf4_t op1, - uint16_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint16mf4_t test_vmerge_vxm_u16mf4 (vuint16mf4_t op1, uint16_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf2( @@ -610,9 +551,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmerge_vvm_u16mf2(vbool32_t mask, vuint16mf2_t op1, - vuint16mf2_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint16mf2_t test_vmerge_vvm_u16mf2 (vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf2( @@ -620,9 +560,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmerge_vxm_u16mf2(vbool32_t mask, vuint16mf2_t op1, - uint16_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint16mf2_t test_vmerge_vxm_u16mf2 (vuint16mf2_t op1, uint16_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m1( @@ -630,9 +569,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmerge_vvm_u16m1(vbool16_t mask, vuint16m1_t op1, - vuint16m1_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint16m1_t test_vmerge_vvm_u16m1 (vuint16m1_t op1, vuint16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m1( @@ -640,9 +578,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmerge_vxm_u16m1(vbool16_t mask, vuint16m1_t op1, uint16_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint16m1_t test_vmerge_vxm_u16m1 (vuint16m1_t op1, uint16_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m2( @@ -650,9 +587,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmerge_vvm_u16m2(vbool8_t mask, vuint16m2_t op1, - vuint16m2_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint16m2_t test_vmerge_vvm_u16m2 (vuint16m2_t op1, vuint16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m2( @@ -660,9 +596,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmerge_vxm_u16m2(vbool8_t mask, vuint16m2_t op1, uint16_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint16m2_t test_vmerge_vxm_u16m2 (vuint16m2_t op1, uint16_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m4( @@ -670,9 +605,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmerge_vvm_u16m4(vbool4_t mask, vuint16m4_t op1, - vuint16m4_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint16m4_t test_vmerge_vvm_u16m4 (vuint16m4_t op1, vuint16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m4( @@ -680,9 +614,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmerge_vxm_u16m4(vbool4_t mask, vuint16m4_t op1, uint16_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint16m4_t test_vmerge_vxm_u16m4 (vuint16m4_t op1, uint16_t op2, vbool4_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m8( @@ -690,9 +623,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmerge_vvm_u16m8(vbool2_t mask, vuint16m8_t op1, - vuint16m8_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint16m8_t test_vmerge_vvm_u16m8 (vuint16m8_t op1, vuint16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m8( @@ -700,9 +632,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmerge_vxm_u16m8(vbool2_t mask, vuint16m8_t op1, uint16_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint16m8_t test_vmerge_vxm_u16m8 (vuint16m8_t op1, uint16_t op2, vbool2_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2( @@ -710,9 +641,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmerge_vvm_u32mf2(vbool64_t mask, vuint32mf2_t op1, - vuint32mf2_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint32mf2_t test_vmerge_vvm_u32mf2 (vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2( @@ -720,9 +650,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmerge_vxm_u32mf2(vbool64_t mask, vuint32mf2_t op1, - uint32_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint32mf2_t test_vmerge_vxm_u32mf2 (vuint32mf2_t op1, uint32_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m1( @@ -730,9 +659,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmerge_vvm_u32m1(vbool32_t mask, vuint32m1_t op1, - vuint32m1_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint32m1_t test_vmerge_vvm_u32m1 (vuint32m1_t op1, vuint32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m1( @@ -740,9 +668,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmerge_vxm_u32m1(vbool32_t mask, vuint32m1_t op1, uint32_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint32m1_t test_vmerge_vxm_u32m1 (vuint32m1_t op1, uint32_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m2( @@ -750,9 +677,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmerge_vvm_u32m2(vbool16_t mask, vuint32m2_t op1, - vuint32m2_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint32m2_t test_vmerge_vvm_u32m2 (vuint32m2_t op1, vuint32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m2( @@ -760,9 +686,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmerge_vxm_u32m2(vbool16_t mask, vuint32m2_t op1, uint32_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint32m2_t test_vmerge_vxm_u32m2 (vuint32m2_t op1, uint32_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m4( @@ -770,9 +695,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmerge_vvm_u32m4(vbool8_t mask, vuint32m4_t op1, - vuint32m4_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint32m4_t test_vmerge_vvm_u32m4 (vuint32m4_t op1, vuint32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m4( @@ -780,9 +704,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmerge_vxm_u32m4(vbool8_t mask, vuint32m4_t op1, uint32_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint32m4_t test_vmerge_vxm_u32m4 (vuint32m4_t op1, uint32_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m8( @@ -790,9 +713,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmerge_vvm_u32m8(vbool4_t mask, vuint32m8_t op1, - vuint32m8_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint32m8_t test_vmerge_vvm_u32m8 (vuint32m8_t op1, vuint32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m8( @@ -800,9 +722,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmerge_vxm_u32m8(vbool4_t mask, vuint32m8_t op1, uint32_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint32m8_t test_vmerge_vxm_u32m8 (vuint32m8_t op1, uint32_t op2, vbool4_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m1( @@ -810,9 +731,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmerge_vvm_u64m1(vbool64_t mask, vuint64m1_t op1, - vuint64m1_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint64m1_t test_vmerge_vvm_u64m1 (vuint64m1_t op1, vuint64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m1( @@ -820,9 +740,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmerge_vxm_u64m1(vbool64_t mask, vuint64m1_t op1, uint64_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint64m1_t test_vmerge_vxm_u64m1 (vuint64m1_t op1, uint64_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m2( @@ -830,9 +749,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmerge_vvm_u64m2(vbool32_t mask, vuint64m2_t op1, - vuint64m2_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint64m2_t test_vmerge_vvm_u64m2 (vuint64m2_t op1, vuint64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m2( @@ -840,9 +758,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmerge_vxm_u64m2(vbool32_t mask, vuint64m2_t op1, uint64_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint64m2_t test_vmerge_vxm_u64m2 (vuint64m2_t op1, uint64_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m4( @@ -850,9 +767,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmerge_vvm_u64m4(vbool16_t mask, vuint64m4_t op1, - vuint64m4_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint64m4_t test_vmerge_vvm_u64m4 (vuint64m4_t op1, vuint64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m4( @@ -860,9 +776,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmerge_vxm_u64m4(vbool16_t mask, vuint64m4_t op1, uint64_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint64m4_t test_vmerge_vxm_u64m4 (vuint64m4_t op1, uint64_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m8( @@ -870,9 +785,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmerge_vvm_u64m8(vbool8_t mask, vuint64m8_t op1, - vuint64m8_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint64m8_t test_vmerge_vvm_u64m8 (vuint64m8_t op1, vuint64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m8( @@ -880,9 +794,62 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmerge_vxm_u64m8(vbool8_t mask, vuint64m8_t op1, uint64_t op2, - size_t vl) { - return vmerge(mask, op1, op2, vl); +vuint64m8_t test_vmerge_vxm_u64m8 (vuint64m8_t op1, uint64_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vmerge_vvm_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vmerge_vvm_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vmerge_vvm_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vmerge_vvm_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vmerge_vvm_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vmerge_vvm_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2( @@ -890,9 +857,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vmerge_vvm_f32mf2(vbool64_t mask, vfloat32mf2_t op1, - vfloat32mf2_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vfloat32mf2_t test_vmerge_vvm_f32mf2 (vfloat32mf2_t op1, vfloat32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m1( @@ -900,9 +866,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vmerge_vvm_f32m1(vbool32_t mask, vfloat32m1_t op1, - vfloat32m1_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vfloat32m1_t test_vmerge_vvm_f32m1 (vfloat32m1_t op1, vfloat32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m2( @@ -910,9 +875,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vmerge_vvm_f32m2(vbool16_t mask, vfloat32m2_t op1, - vfloat32m2_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vfloat32m2_t test_vmerge_vvm_f32m2 (vfloat32m2_t op1, vfloat32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m4( @@ -920,9 +884,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vmerge_vvm_f32m4(vbool8_t mask, vfloat32m4_t op1, - vfloat32m4_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vfloat32m4_t test_vmerge_vvm_f32m4 (vfloat32m4_t op1, vfloat32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m8( @@ -930,9 +893,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vmerge_vvm_f32m8(vbool4_t mask, vfloat32m8_t op1, - vfloat32m8_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vfloat32m8_t test_vmerge_vvm_f32m8 (vfloat32m8_t op1, vfloat32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m1( @@ -940,9 +902,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vmerge_vvm_f64m1(vbool64_t mask, vfloat64m1_t op1, - vfloat64m1_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vfloat64m1_t test_vmerge_vvm_f64m1 (vfloat64m1_t op1, vfloat64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m2( @@ -950,9 +911,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vmerge_vvm_f64m2(vbool32_t mask, vfloat64m2_t op1, - vfloat64m2_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vfloat64m2_t test_vmerge_vvm_f64m2 (vfloat64m2_t op1, vfloat64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m4( @@ -960,9 +920,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vmerge_vvm_f64m4(vbool16_t mask, vfloat64m4_t op1, - vfloat64m4_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vfloat64m4_t test_vmerge_vvm_f64m4 (vfloat64m4_t op1, vfloat64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m8( @@ -970,9 +929,242 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vmerge_vvm_f64m8(vbool8_t mask, vfloat64m8_t op1, - vfloat64m8_t op2, size_t vl) { - return vmerge(mask, op1, op2, vl); +vfloat64m8_t test_vmerge_vvm_f64m8 (vfloat64m8_t op1, vfloat64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmerge_vvm_i8mf8_tu (vint8mf8_t merge, vint8mf8_t op1, vint8mf8_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmerge_vxm_i8mf8_tu (vint8mf8_t merge, vint8mf8_t op1, int8_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmerge_vvm_i8mf4_tu (vint8mf4_t merge, vint8mf4_t op1, vint8mf4_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmerge_vxm_i8mf4_tu (vint8mf4_t merge, vint8mf4_t op1, int8_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmerge_vvm_i8mf2_tu (vint8mf2_t merge, vint8mf2_t op1, vint8mf2_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmerge_vxm_i8mf2_tu (vint8mf2_t merge, vint8mf2_t op1, int8_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmerge_vvm_i8m1_tu (vint8m1_t merge, vint8m1_t op1, vint8m1_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmerge_vxm_i8m1_tu (vint8m1_t merge, vint8m1_t op1, int8_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmerge_vvm_i8m2_tu (vint8m2_t merge, vint8m2_t op1, vint8m2_t op2, vbool4_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmerge_vxm_i8m2_tu (vint8m2_t merge, vint8m2_t op1, int8_t op2, vbool4_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmerge_vvm_i8m4_tu (vint8m4_t merge, vint8m4_t op1, vint8m4_t op2, vbool2_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmerge_vxm_i8m4_tu (vint8m4_t merge, vint8m4_t op1, int8_t op2, vbool2_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmerge_vvm_i8m8_tu (vint8m8_t merge, vint8m8_t op1, vint8m8_t op2, vbool1_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmerge_vxm_i8m8_tu (vint8m8_t merge, vint8m8_t op1, int8_t op2, vbool1_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmerge_vvm_i16mf4_tu (vint16mf4_t merge, vint16mf4_t op1, vint16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmerge_vxm_i16mf4_tu (vint16mf4_t merge, vint16mf4_t op1, int16_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmerge_vvm_i16mf2_tu (vint16mf2_t merge, vint16mf2_t op1, vint16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmerge_vxm_i16mf2_tu (vint16mf2_t merge, vint16mf2_t op1, int16_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmerge_vvm_i16m1_tu (vint16m1_t merge, vint16m1_t op1, vint16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmerge_vxm_i16m1_tu (vint16m1_t merge, vint16m1_t op1, int16_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmerge_vvm_i16m2_tu (vint16m2_t merge, vint16m2_t op1, vint16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmerge_vxm_i16m2_tu (vint16m2_t merge, vint16m2_t op1, int16_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmerge_vvm_i16m4_tu (vint16m4_t merge, vint16m4_t op1, vint16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmerge_vxm_i16m4_tu (vint16m4_t merge, vint16m4_t op1, int16_t op2, vbool4_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmerge_vvm_i16m8_tu (vint16m8_t merge, vint16m8_t op1, vint16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmerge_vxm_i16m8_tu (vint16m8_t merge, vint16m8_t op1, int16_t op2, vbool2_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2_tu( @@ -980,8 +1172,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmerge_vvm_i32mf2_tu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmerge_tu(mask, merge, op1, op2, vl); +vint32mf2_t test_vmerge_vvm_i32mf2_tu (vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2_tu( @@ -989,78 +1181,1608 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmerge_vxm_i32mf2_tu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmerge_tu(mask, merge, op1, op2, vl); +vint32mf2_t test_vmerge_vxm_i32mf2_tu (vint32mf2_t merge, vint32mf2_t op1, int32_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2_tu( +// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmerge_vvm_u32mf2_tu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmerge_tu(mask, merge, op1, op2, vl); +vint32m1_t test_vmerge_vvm_i32m1_tu (vint32m1_t merge, vint32m1_t op1, vint32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2_tu( +// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmerge_vxm_u32mf2_tu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmerge_tu(mask, merge, op1, op2, vl); +vint32m1_t test_vmerge_vxm_i32m1_tu (vint32m1_t merge, vint32m1_t op1, int32_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2_ta( +// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmerge_vvm_i32mf2_ta(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint32m2_t test_vmerge_vvm_i32m2_tu (vint32m2_t merge, vint32m2_t op1, vint32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2_ta( +// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmerge_vxm_i32mf2_ta(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint32m2_t test_vmerge_vxm_i32m2_tu (vint32m2_t merge, vint32m2_t op1, int32_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2_ta( +// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmerge_vvm_u32mf2_ta(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint32m4_t test_vmerge_vvm_i32m4_tu (vint32m4_t merge, vint32m4_t op1, vint32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2_ta( +// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmerge_vxm_u32mf2_ta(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vint32m4_t test_vmerge_vxm_i32m4_tu (vint32m4_t merge, vint32m4_t op1, int32_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2_tu( +// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vmerge_vvm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmerge_tu(mask, merge, op1, op2, vl); +vint32m8_t test_vmerge_vvm_i32m8_tu (vint32m8_t merge, vint32m8_t op1, vint32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2_ta( +// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmerge_vxm_i32m8_tu (vint32m8_t merge, vint32m8_t op1, int32_t op2, vbool4_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmerge_vvm_i64m1_tu (vint64m1_t merge, vint64m1_t op1, vint64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmerge_vxm_i64m1_tu (vint64m1_t merge, vint64m1_t op1, int64_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmerge_vvm_i64m2_tu (vint64m2_t merge, vint64m2_t op1, vint64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmerge_vxm_i64m2_tu (vint64m2_t merge, vint64m2_t op1, int64_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmerge_vvm_i64m4_tu (vint64m4_t merge, vint64m4_t op1, vint64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmerge_vxm_i64m4_tu (vint64m4_t merge, vint64m4_t op1, int64_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmerge_vvm_i64m8_tu (vint64m8_t merge, vint64m8_t op1, vint64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmerge_vxm_i64m8_tu (vint64m8_t merge, vint64m8_t op1, int64_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmerge_vvm_u8mf8_tu (vuint8mf8_t merge, vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmerge_vxm_u8mf8_tu (vuint8mf8_t merge, vuint8mf8_t op1, uint8_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmerge_vvm_u8mf4_tu (vuint8mf4_t merge, vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmerge_vxm_u8mf4_tu (vuint8mf4_t merge, vuint8mf4_t op1, uint8_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmerge_vvm_u8mf2_tu (vuint8mf2_t merge, vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmerge_vxm_u8mf2_tu (vuint8mf2_t merge, vuint8mf2_t op1, uint8_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmerge_vvm_u8m1_tu (vuint8m1_t merge, vuint8m1_t op1, vuint8m1_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmerge_vxm_u8m1_tu (vuint8m1_t merge, vuint8m1_t op1, uint8_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmerge_vvm_u8m2_tu (vuint8m2_t merge, vuint8m2_t op1, vuint8m2_t op2, vbool4_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmerge_vxm_u8m2_tu (vuint8m2_t merge, vuint8m2_t op1, uint8_t op2, vbool4_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmerge_vvm_u8m4_tu (vuint8m4_t merge, vuint8m4_t op1, vuint8m4_t op2, vbool2_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmerge_vxm_u8m4_tu (vuint8m4_t merge, vuint8m4_t op1, uint8_t op2, vbool2_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmerge_vvm_u8m8_tu (vuint8m8_t merge, vuint8m8_t op1, vuint8m8_t op2, vbool1_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmerge_vxm_u8m8_tu (vuint8m8_t merge, vuint8m8_t op1, uint8_t op2, vbool1_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmerge_vvm_u16mf4_tu (vuint16mf4_t merge, vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmerge_vxm_u16mf4_tu (vuint16mf4_t merge, vuint16mf4_t op1, uint16_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmerge_vvm_u16mf2_tu (vuint16mf2_t merge, vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmerge_vxm_u16mf2_tu (vuint16mf2_t merge, vuint16mf2_t op1, uint16_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmerge_vvm_u16m1_tu (vuint16m1_t merge, vuint16m1_t op1, vuint16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmerge_vxm_u16m1_tu (vuint16m1_t merge, vuint16m1_t op1, uint16_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmerge_vvm_u16m2_tu (vuint16m2_t merge, vuint16m2_t op1, vuint16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmerge_vxm_u16m2_tu (vuint16m2_t merge, vuint16m2_t op1, uint16_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmerge_vvm_u16m4_tu (vuint16m4_t merge, vuint16m4_t op1, vuint16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmerge_vxm_u16m4_tu (vuint16m4_t merge, vuint16m4_t op1, uint16_t op2, vbool4_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmerge_vvm_u16m8_tu (vuint16m8_t merge, vuint16m8_t op1, vuint16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmerge_vxm_u16m8_tu (vuint16m8_t merge, vuint16m8_t op1, uint16_t op2, vbool2_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmerge_vvm_u32mf2_tu (vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmerge_vxm_u32mf2_tu (vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmerge_vvm_u32m1_tu (vuint32m1_t merge, vuint32m1_t op1, vuint32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmerge_vxm_u32m1_tu (vuint32m1_t merge, vuint32m1_t op1, uint32_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmerge_vvm_u32m2_tu (vuint32m2_t merge, vuint32m2_t op1, vuint32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmerge_vxm_u32m2_tu (vuint32m2_t merge, vuint32m2_t op1, uint32_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmerge_vvm_u32m4_tu (vuint32m4_t merge, vuint32m4_t op1, vuint32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmerge_vxm_u32m4_tu (vuint32m4_t merge, vuint32m4_t op1, uint32_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmerge_vvm_u32m8_tu (vuint32m8_t merge, vuint32m8_t op1, vuint32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmerge_vxm_u32m8_tu (vuint32m8_t merge, vuint32m8_t op1, uint32_t op2, vbool4_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmerge_vvm_u64m1_tu (vuint64m1_t merge, vuint64m1_t op1, vuint64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmerge_vxm_u64m1_tu (vuint64m1_t merge, vuint64m1_t op1, uint64_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmerge_vvm_u64m2_tu (vuint64m2_t merge, vuint64m2_t op1, vuint64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmerge_vxm_u64m2_tu (vuint64m2_t merge, vuint64m2_t op1, uint64_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmerge_vvm_u64m4_tu (vuint64m4_t merge, vuint64m4_t op1, vuint64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmerge_vxm_u64m4_tu (vuint64m4_t merge, vuint64m4_t op1, uint64_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmerge_vvm_u64m8_tu (vuint64m8_t merge, vuint64m8_t op1, vuint64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmerge_vxm_u64m8_tu (vuint64m8_t merge, vuint64m8_t op1, uint64_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmerge_vvm_i8mf8_ta (vint8mf8_t op1, vint8mf8_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmerge_vxm_i8mf8_ta (vint8mf8_t op1, int8_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmerge_vvm_i8mf4_ta (vint8mf4_t op1, vint8mf4_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmerge_vxm_i8mf4_ta (vint8mf4_t op1, int8_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmerge_vvm_i8mf2_ta (vint8mf2_t op1, vint8mf2_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmerge_vxm_i8mf2_ta (vint8mf2_t op1, int8_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmerge_vvm_i8m1_ta (vint8m1_t op1, vint8m1_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmerge_vxm_i8m1_ta (vint8m1_t op1, int8_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmerge_vvm_i8m2_ta (vint8m2_t op1, vint8m2_t op2, vbool4_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmerge_vxm_i8m2_ta (vint8m2_t op1, int8_t op2, vbool4_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmerge_vvm_i8m4_ta (vint8m4_t op1, vint8m4_t op2, vbool2_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmerge_vxm_i8m4_ta (vint8m4_t op1, int8_t op2, vbool2_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmerge_vvm_i8m8_ta (vint8m8_t op1, vint8m8_t op2, vbool1_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmerge_vxm_i8m8_ta (vint8m8_t op1, int8_t op2, vbool1_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmerge_vvm_i16mf4_ta (vint16mf4_t op1, vint16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmerge_vxm_i16mf4_ta (vint16mf4_t op1, int16_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmerge_vvm_i16mf2_ta (vint16mf2_t op1, vint16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmerge_vxm_i16mf2_ta (vint16mf2_t op1, int16_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmerge_vvm_i16m1_ta (vint16m1_t op1, vint16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmerge_vxm_i16m1_ta (vint16m1_t op1, int16_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmerge_vvm_i16m2_ta (vint16m2_t op1, vint16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmerge_vxm_i16m2_ta (vint16m2_t op1, int16_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmerge_vvm_i16m4_ta (vint16m4_t op1, vint16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmerge_vxm_i16m4_ta (vint16m4_t op1, int16_t op2, vbool4_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmerge_vvm_i16m8_ta (vint16m8_t op1, vint16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmerge_vxm_i16m8_ta (vint16m8_t op1, int16_t op2, vbool2_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmerge_vvm_i32mf2_ta (vint32mf2_t op1, vint32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmerge_vxm_i32mf2_ta (vint32mf2_t op1, int32_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmerge_vvm_i32m1_ta (vint32m1_t op1, vint32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmerge_vxm_i32m1_ta (vint32m1_t op1, int32_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmerge_vvm_i32m2_ta (vint32m2_t op1, vint32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmerge_vxm_i32m2_ta (vint32m2_t op1, int32_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmerge_vvm_i32m4_ta (vint32m4_t op1, vint32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmerge_vxm_i32m4_ta (vint32m4_t op1, int32_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmerge_vvm_i32m8_ta (vint32m8_t op1, vint32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmerge_vxm_i32m8_ta (vint32m8_t op1, int32_t op2, vbool4_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmerge_vvm_i64m1_ta (vint64m1_t op1, vint64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmerge_vxm_i64m1_ta (vint64m1_t op1, int64_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmerge_vvm_i64m2_ta (vint64m2_t op1, vint64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmerge_vxm_i64m2_ta (vint64m2_t op1, int64_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmerge_vvm_i64m4_ta (vint64m4_t op1, vint64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmerge_vxm_i64m4_ta (vint64m4_t op1, int64_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmerge_vvm_i64m8_ta (vint64m8_t op1, vint64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmerge_vxm_i64m8_ta (vint64m8_t op1, int64_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmerge_vvm_u8mf8_ta (vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmerge_vxm_u8mf8_ta (vuint8mf8_t op1, uint8_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmerge_vvm_u8mf4_ta (vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmerge_vxm_u8mf4_ta (vuint8mf4_t op1, uint8_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmerge_vvm_u8mf2_ta (vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmerge_vxm_u8mf2_ta (vuint8mf2_t op1, uint8_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmerge_vvm_u8m1_ta (vuint8m1_t op1, vuint8m1_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmerge_vxm_u8m1_ta (vuint8m1_t op1, uint8_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmerge_vvm_u8m2_ta (vuint8m2_t op1, vuint8m2_t op2, vbool4_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmerge_vxm_u8m2_ta (vuint8m2_t op1, uint8_t op2, vbool4_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmerge_vvm_u8m4_ta (vuint8m4_t op1, vuint8m4_t op2, vbool2_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmerge_vxm_u8m4_ta (vuint8m4_t op1, uint8_t op2, vbool2_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmerge_vvm_u8m8_ta (vuint8m8_t op1, vuint8m8_t op2, vbool1_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmerge_vxm_u8m8_ta (vuint8m8_t op1, uint8_t op2, vbool1_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmerge_vvm_u16mf4_ta (vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmerge_vxm_u16mf4_ta (vuint16mf4_t op1, uint16_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmerge_vvm_u16mf2_ta (vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmerge_vxm_u16mf2_ta (vuint16mf2_t op1, uint16_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmerge_vvm_u16m1_ta (vuint16m1_t op1, vuint16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmerge_vxm_u16m1_ta (vuint16m1_t op1, uint16_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmerge_vvm_u16m2_ta (vuint16m2_t op1, vuint16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmerge_vxm_u16m2_ta (vuint16m2_t op1, uint16_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmerge_vvm_u16m4_ta (vuint16m4_t op1, vuint16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmerge_vxm_u16m4_ta (vuint16m4_t op1, uint16_t op2, vbool4_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmerge_vvm_u16m8_ta (vuint16m8_t op1, vuint16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmerge_vxm_u16m8_ta (vuint16m8_t op1, uint16_t op2, vbool2_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmerge_vvm_u32mf2_ta (vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmerge_vxm_u32mf2_ta (vuint32mf2_t op1, uint32_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmerge_vvm_u32m1_ta (vuint32m1_t op1, vuint32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmerge_vxm_u32m1_ta (vuint32m1_t op1, uint32_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmerge_vvm_u32m2_ta (vuint32m2_t op1, vuint32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmerge_vxm_u32m2_ta (vuint32m2_t op1, uint32_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmerge_vvm_u32m4_ta (vuint32m4_t op1, vuint32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmerge_vxm_u32m4_ta (vuint32m4_t op1, uint32_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmerge_vvm_u32m8_ta (vuint32m8_t op1, vuint32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmerge_vxm_u32m8_ta (vuint32m8_t op1, uint32_t op2, vbool4_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmerge_vvm_u64m1_ta (vuint64m1_t op1, vuint64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmerge_vxm_u64m1_ta (vuint64m1_t op1, uint64_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmerge_vvm_u64m2_ta (vuint64m2_t op1, vuint64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmerge_vxm_u64m2_ta (vuint64m2_t op1, uint64_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmerge_vvm_u64m4_ta (vuint64m4_t op1, vuint64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmerge_vxm_u64m4_ta (vuint64m4_t op1, uint64_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmerge_vvm_u64m8_ta (vuint64m8_t op1, vuint64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmerge_vxm_u64m8_ta (vuint64m8_t op1, uint64_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f16.nxv1f16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vmerge_vvm_f16mf4_tu (vfloat16mf4_t merge, vfloat16mf4_t op1, vfloat16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f16.nxv2f16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vmerge_vvm_f16mf2_tu (vfloat16mf2_t merge, vfloat16mf2_t op1, vfloat16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f16.nxv4f16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vmerge_vvm_f16m1_tu (vfloat16m1_t merge, vfloat16m1_t op1, vfloat16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f16.nxv8f16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vmerge_vvm_f16m2_tu (vfloat16m2_t merge, vfloat16m2_t op1, vfloat16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f16.nxv16f16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vmerge_vvm_f16m4_tu (vfloat16m4_t merge, vfloat16m4_t op1, vfloat16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32f16.nxv32f16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vmerge_vvm_f16m8_tu (vfloat16m8_t merge, vfloat16m8_t op1, vfloat16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vmerge_vvm_f32mf2_tu (vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f32.nxv2f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vmerge_vvm_f32m1_tu (vfloat32m1_t merge, vfloat32m1_t op1, vfloat32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f32.nxv4f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vmerge_vvm_f32m2_tu (vfloat32m2_t merge, vfloat32m2_t op1, vfloat32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f32.nxv8f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vmerge_vvm_f32m4_tu (vfloat32m4_t merge, vfloat32m4_t op1, vfloat32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f32.nxv16f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vmerge_vvm_f32m8_tu (vfloat32m8_t merge, vfloat32m8_t op1, vfloat32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f64.nxv1f64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vmerge_vvm_f64m1_tu (vfloat64m1_t merge, vfloat64m1_t op1, vfloat64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f64.nxv2f64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vmerge_vvm_f64m2_tu (vfloat64m2_t merge, vfloat64m2_t op1, vfloat64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f64.nxv4f64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vmerge_vvm_f64m4_tu (vfloat64m4_t merge, vfloat64m4_t op1, vfloat64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f64.nxv8f64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vmerge_vvm_f64m8_tu (vfloat64m8_t merge, vfloat64m8_t op1, vfloat64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vmerge_vvm_f16mf4_ta (vfloat16mf4_t op1, vfloat16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vmerge_vvm_f16mf2_ta (vfloat16mf2_t op1, vfloat16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vmerge_vvm_f16m1_ta (vfloat16m1_t op1, vfloat16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vmerge_vvm_f16m2_ta (vfloat16m2_t op1, vfloat16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vmerge_vvm_f16m4_ta (vfloat16m4_t op1, vfloat16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vmerge_vvm_f16m8_ta (vfloat16m8_t op1, vfloat16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vmerge_vvm_f32mf2_ta (vfloat32mf2_t op1, vfloat32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vmerge_vvm_f32m1_ta (vfloat32m1_t op1, vfloat32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vmerge_vvm_f32m2_ta (vfloat32m2_t op1, vfloat32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vmerge_vvm_f32m4_ta (vfloat32m4_t op1, vfloat32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vmerge_vvm_f32m8_ta (vfloat32m8_t op1, vfloat32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vmerge_vvm_f64m1_ta (vfloat64m1_t op1, vfloat64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vmerge_vvm_f64m2_ta (vfloat64m2_t op1, vfloat64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vmerge_vvm_f64m4_ta (vfloat64m4_t op1, vfloat64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vmerge_vvm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmerge_ta(mask, op1, op2, vl); +vfloat64m8_t test_vmerge_vvm_f64m8_ta (vfloat64m8_t op1, vfloat64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_ta(op1, op2, mask, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vcompress.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vcompress.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vcompress.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vcompress.c @@ -11,8 +11,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vcompress_vm_i8mf8 (vbool64_t mask, vint8mf8_t dest, vint8mf8_t src, size_t vl) { - return vcompress_vm_i8mf8(mask, dest, src, vl); +vint8mf8_t test_vcompress_vm_i8mf8 (vint8mf8_t dest, vint8mf8_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_i8mf8(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf4( @@ -20,8 +20,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vcompress_vm_i8mf4 (vbool32_t mask, vint8mf4_t dest, vint8mf4_t src, size_t vl) { - return vcompress_vm_i8mf4(mask, dest, src, vl); +vint8mf4_t test_vcompress_vm_i8mf4 (vint8mf4_t dest, vint8mf4_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_i8mf4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf2( @@ -29,8 +29,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vcompress_vm_i8mf2 (vbool16_t mask, vint8mf2_t dest, vint8mf2_t src, size_t vl) { - return vcompress_vm_i8mf2(mask, dest, src, vl); +vint8mf2_t test_vcompress_vm_i8mf2 (vint8mf2_t dest, vint8mf2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_i8mf2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m1( @@ -38,8 +38,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vcompress_vm_i8m1 (vbool8_t mask, vint8m1_t dest, vint8m1_t src, size_t vl) { - return vcompress_vm_i8m1(mask, dest, src, vl); +vint8m1_t test_vcompress_vm_i8m1 (vint8m1_t dest, vint8m1_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_i8m1(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m2( @@ -47,8 +47,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vcompress_vm_i8m2 (vbool4_t mask, vint8m2_t dest, vint8m2_t src, size_t vl) { - return vcompress_vm_i8m2(mask, dest, src, vl); +vint8m2_t test_vcompress_vm_i8m2 (vint8m2_t dest, vint8m2_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_i8m2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m4( @@ -56,8 +56,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vcompress_vm_i8m4 (vbool2_t mask, vint8m4_t dest, vint8m4_t src, size_t vl) { - return vcompress_vm_i8m4(mask, dest, src, vl); +vint8m4_t test_vcompress_vm_i8m4 (vint8m4_t dest, vint8m4_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_i8m4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m8( @@ -65,8 +65,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv64i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vcompress_vm_i8m8 (vbool1_t mask, vint8m8_t dest, vint8m8_t src, size_t vl) { - return vcompress_vm_i8m8(mask, dest, src, vl); +vint8m8_t test_vcompress_vm_i8m8 (vint8m8_t dest, vint8m8_t src, vbool1_t mask, size_t vl) { + return vcompress_vm_i8m8(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf4( @@ -74,8 +74,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vcompress_vm_i16mf4 (vbool64_t mask, vint16mf4_t dest, vint16mf4_t src, size_t vl) { - return vcompress_vm_i16mf4(mask, dest, src, vl); +vint16mf4_t test_vcompress_vm_i16mf4 (vint16mf4_t dest, vint16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_i16mf4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf2( @@ -83,8 +83,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vcompress_vm_i16mf2 (vbool32_t mask, vint16mf2_t dest, vint16mf2_t src, size_t vl) { - return vcompress_vm_i16mf2(mask, dest, src, vl); +vint16mf2_t test_vcompress_vm_i16mf2 (vint16mf2_t dest, vint16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_i16mf2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m1( @@ -92,8 +92,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vcompress_vm_i16m1 (vbool16_t mask, vint16m1_t dest, vint16m1_t src, size_t vl) { - return vcompress_vm_i16m1(mask, dest, src, vl); +vint16m1_t test_vcompress_vm_i16m1 (vint16m1_t dest, vint16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_i16m1(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m2( @@ -101,8 +101,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vcompress_vm_i16m2 (vbool8_t mask, vint16m2_t dest, vint16m2_t src, size_t vl) { - return vcompress_vm_i16m2(mask, dest, src, vl); +vint16m2_t test_vcompress_vm_i16m2 (vint16m2_t dest, vint16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_i16m2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m4( @@ -110,8 +110,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vcompress_vm_i16m4 (vbool4_t mask, vint16m4_t dest, vint16m4_t src, size_t vl) { - return vcompress_vm_i16m4(mask, dest, src, vl); +vint16m4_t test_vcompress_vm_i16m4 (vint16m4_t dest, vint16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_i16m4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m8( @@ -119,8 +119,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vcompress_vm_i16m8 (vbool2_t mask, vint16m8_t dest, vint16m8_t src, size_t vl) { - return vcompress_vm_i16m8(mask, dest, src, vl); +vint16m8_t test_vcompress_vm_i16m8 (vint16m8_t dest, vint16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_i16m8(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2( @@ -128,8 +128,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vcompress_vm_i32mf2 (vbool64_t mask, vint32mf2_t dest, vint32mf2_t src, size_t vl) { - return vcompress_vm_i32mf2(mask, dest, src, vl); +vint32mf2_t test_vcompress_vm_i32mf2 (vint32mf2_t dest, vint32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_i32mf2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m1( @@ -137,8 +137,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vcompress_vm_i32m1 (vbool32_t mask, vint32m1_t dest, vint32m1_t src, size_t vl) { - return vcompress_vm_i32m1(mask, dest, src, vl); +vint32m1_t test_vcompress_vm_i32m1 (vint32m1_t dest, vint32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_i32m1(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m2( @@ -146,8 +146,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vcompress_vm_i32m2 (vbool16_t mask, vint32m2_t dest, vint32m2_t src, size_t vl) { - return vcompress_vm_i32m2(mask, dest, src, vl); +vint32m2_t test_vcompress_vm_i32m2 (vint32m2_t dest, vint32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_i32m2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m4( @@ -155,8 +155,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vcompress_vm_i32m4 (vbool8_t mask, vint32m4_t dest, vint32m4_t src, size_t vl) { - return vcompress_vm_i32m4(mask, dest, src, vl); +vint32m4_t test_vcompress_vm_i32m4 (vint32m4_t dest, vint32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_i32m4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m8( @@ -164,8 +164,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vcompress_vm_i32m8 (vbool4_t mask, vint32m8_t dest, vint32m8_t src, size_t vl) { - return vcompress_vm_i32m8(mask, dest, src, vl); +vint32m8_t test_vcompress_vm_i32m8 (vint32m8_t dest, vint32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_i32m8(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m1( @@ -173,8 +173,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vcompress_vm_i64m1 (vbool64_t mask, vint64m1_t dest, vint64m1_t src, size_t vl) { - return vcompress_vm_i64m1(mask, dest, src, vl); +vint64m1_t test_vcompress_vm_i64m1 (vint64m1_t dest, vint64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_i64m1(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m2( @@ -182,8 +182,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vcompress_vm_i64m2 (vbool32_t mask, vint64m2_t dest, vint64m2_t src, size_t vl) { - return vcompress_vm_i64m2(mask, dest, src, vl); +vint64m2_t test_vcompress_vm_i64m2 (vint64m2_t dest, vint64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_i64m2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m4( @@ -191,8 +191,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vcompress_vm_i64m4 (vbool16_t mask, vint64m4_t dest, vint64m4_t src, size_t vl) { - return vcompress_vm_i64m4(mask, dest, src, vl); +vint64m4_t test_vcompress_vm_i64m4 (vint64m4_t dest, vint64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_i64m4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m8( @@ -200,8 +200,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vcompress_vm_i64m8 (vbool8_t mask, vint64m8_t dest, vint64m8_t src, size_t vl) { - return vcompress_vm_i64m8(mask, dest, src, vl); +vint64m8_t test_vcompress_vm_i64m8 (vint64m8_t dest, vint64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_i64m8(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf8( @@ -209,8 +209,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vcompress_vm_u8mf8 (vbool64_t mask, vuint8mf8_t dest, vuint8mf8_t src, size_t vl) { - return vcompress_vm_u8mf8(mask, dest, src, vl); +vuint8mf8_t test_vcompress_vm_u8mf8 (vuint8mf8_t dest, vuint8mf8_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_u8mf8(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf4( @@ -218,8 +218,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vcompress_vm_u8mf4 (vbool32_t mask, vuint8mf4_t dest, vuint8mf4_t src, size_t vl) { - return vcompress_vm_u8mf4(mask, dest, src, vl); +vuint8mf4_t test_vcompress_vm_u8mf4 (vuint8mf4_t dest, vuint8mf4_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_u8mf4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf2( @@ -227,8 +227,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vcompress_vm_u8mf2 (vbool16_t mask, vuint8mf2_t dest, vuint8mf2_t src, size_t vl) { - return vcompress_vm_u8mf2(mask, dest, src, vl); +vuint8mf2_t test_vcompress_vm_u8mf2 (vuint8mf2_t dest, vuint8mf2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_u8mf2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m1( @@ -236,8 +236,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vcompress_vm_u8m1 (vbool8_t mask, vuint8m1_t dest, vuint8m1_t src, size_t vl) { - return vcompress_vm_u8m1(mask, dest, src, vl); +vuint8m1_t test_vcompress_vm_u8m1 (vuint8m1_t dest, vuint8m1_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_u8m1(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m2( @@ -245,8 +245,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vcompress_vm_u8m2 (vbool4_t mask, vuint8m2_t dest, vuint8m2_t src, size_t vl) { - return vcompress_vm_u8m2(mask, dest, src, vl); +vuint8m2_t test_vcompress_vm_u8m2 (vuint8m2_t dest, vuint8m2_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_u8m2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m4( @@ -254,8 +254,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vcompress_vm_u8m4 (vbool2_t mask, vuint8m4_t dest, vuint8m4_t src, size_t vl) { - return vcompress_vm_u8m4(mask, dest, src, vl); +vuint8m4_t test_vcompress_vm_u8m4 (vuint8m4_t dest, vuint8m4_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_u8m4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m8( @@ -263,8 +263,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv64i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vcompress_vm_u8m8 (vbool1_t mask, vuint8m8_t dest, vuint8m8_t src, size_t vl) { - return vcompress_vm_u8m8(mask, dest, src, vl); +vuint8m8_t test_vcompress_vm_u8m8 (vuint8m8_t dest, vuint8m8_t src, vbool1_t mask, size_t vl) { + return vcompress_vm_u8m8(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf4( @@ -272,8 +272,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vcompress_vm_u16mf4 (vbool64_t mask, vuint16mf4_t dest, vuint16mf4_t src, size_t vl) { - return vcompress_vm_u16mf4(mask, dest, src, vl); +vuint16mf4_t test_vcompress_vm_u16mf4 (vuint16mf4_t dest, vuint16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_u16mf4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf2( @@ -281,8 +281,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vcompress_vm_u16mf2 (vbool32_t mask, vuint16mf2_t dest, vuint16mf2_t src, size_t vl) { - return vcompress_vm_u16mf2(mask, dest, src, vl); +vuint16mf2_t test_vcompress_vm_u16mf2 (vuint16mf2_t dest, vuint16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_u16mf2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m1( @@ -290,8 +290,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vcompress_vm_u16m1 (vbool16_t mask, vuint16m1_t dest, vuint16m1_t src, size_t vl) { - return vcompress_vm_u16m1(mask, dest, src, vl); +vuint16m1_t test_vcompress_vm_u16m1 (vuint16m1_t dest, vuint16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_u16m1(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m2( @@ -299,8 +299,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vcompress_vm_u16m2 (vbool8_t mask, vuint16m2_t dest, vuint16m2_t src, size_t vl) { - return vcompress_vm_u16m2(mask, dest, src, vl); +vuint16m2_t test_vcompress_vm_u16m2 (vuint16m2_t dest, vuint16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_u16m2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m4( @@ -308,8 +308,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vcompress_vm_u16m4 (vbool4_t mask, vuint16m4_t dest, vuint16m4_t src, size_t vl) { - return vcompress_vm_u16m4(mask, dest, src, vl); +vuint16m4_t test_vcompress_vm_u16m4 (vuint16m4_t dest, vuint16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_u16m4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m8( @@ -317,8 +317,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vcompress_vm_u16m8 (vbool2_t mask, vuint16m8_t dest, vuint16m8_t src, size_t vl) { - return vcompress_vm_u16m8(mask, dest, src, vl); +vuint16m8_t test_vcompress_vm_u16m8 (vuint16m8_t dest, vuint16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_u16m8(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2( @@ -326,8 +326,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vcompress_vm_u32mf2 (vbool64_t mask, vuint32mf2_t dest, vuint32mf2_t src, size_t vl) { - return vcompress_vm_u32mf2(mask, dest, src, vl); +vuint32mf2_t test_vcompress_vm_u32mf2 (vuint32mf2_t dest, vuint32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_u32mf2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m1( @@ -335,8 +335,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vcompress_vm_u32m1 (vbool32_t mask, vuint32m1_t dest, vuint32m1_t src, size_t vl) { - return vcompress_vm_u32m1(mask, dest, src, vl); +vuint32m1_t test_vcompress_vm_u32m1 (vuint32m1_t dest, vuint32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_u32m1(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m2( @@ -344,8 +344,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vcompress_vm_u32m2 (vbool16_t mask, vuint32m2_t dest, vuint32m2_t src, size_t vl) { - return vcompress_vm_u32m2(mask, dest, src, vl); +vuint32m2_t test_vcompress_vm_u32m2 (vuint32m2_t dest, vuint32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_u32m2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m4( @@ -353,8 +353,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vcompress_vm_u32m4 (vbool8_t mask, vuint32m4_t dest, vuint32m4_t src, size_t vl) { - return vcompress_vm_u32m4(mask, dest, src, vl); +vuint32m4_t test_vcompress_vm_u32m4 (vuint32m4_t dest, vuint32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_u32m4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m8( @@ -362,8 +362,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vcompress_vm_u32m8 (vbool4_t mask, vuint32m8_t dest, vuint32m8_t src, size_t vl) { - return vcompress_vm_u32m8(mask, dest, src, vl); +vuint32m8_t test_vcompress_vm_u32m8 (vuint32m8_t dest, vuint32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_u32m8(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m1( @@ -371,8 +371,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vcompress_vm_u64m1 (vbool64_t mask, vuint64m1_t dest, vuint64m1_t src, size_t vl) { - return vcompress_vm_u64m1(mask, dest, src, vl); +vuint64m1_t test_vcompress_vm_u64m1 (vuint64m1_t dest, vuint64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_u64m1(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m2( @@ -380,8 +380,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vcompress_vm_u64m2 (vbool32_t mask, vuint64m2_t dest, vuint64m2_t src, size_t vl) { - return vcompress_vm_u64m2(mask, dest, src, vl); +vuint64m2_t test_vcompress_vm_u64m2 (vuint64m2_t dest, vuint64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_u64m2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m4( @@ -389,8 +389,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vcompress_vm_u64m4 (vbool16_t mask, vuint64m4_t dest, vuint64m4_t src, size_t vl) { - return vcompress_vm_u64m4(mask, dest, src, vl); +vuint64m4_t test_vcompress_vm_u64m4 (vuint64m4_t dest, vuint64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_u64m4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m8( @@ -398,8 +398,62 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vcompress_vm_u64m8 (vbool8_t mask, vuint64m8_t dest, vuint64m8_t src, size_t vl) { - return vcompress_vm_u64m8(mask, dest, src, vl); +vuint64m8_t test_vcompress_vm_u64m8 (vuint64m8_t dest, vuint64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_u64m8(dest, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vcompress_vm_f16mf4 (vfloat16mf4_t dest, vfloat16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_f16mf4(dest, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vcompress_vm_f16mf2 (vfloat16mf2_t dest, vfloat16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_f16mf2(dest, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vcompress_vm_f16m1 (vfloat16m1_t dest, vfloat16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_f16m1(dest, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vcompress_vm_f16m2 (vfloat16m2_t dest, vfloat16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_f16m2(dest, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vcompress_vm_f16m4 (vfloat16m4_t dest, vfloat16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_f16m4(dest, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vcompress_vm_f16m8 (vfloat16m8_t dest, vfloat16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_f16m8(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2( @@ -407,8 +461,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vcompress_vm_f32mf2 (vbool64_t mask, vfloat32mf2_t dest, vfloat32mf2_t src, size_t vl) { - return vcompress_vm_f32mf2(mask, dest, src, vl); +vfloat32mf2_t test_vcompress_vm_f32mf2 (vfloat32mf2_t dest, vfloat32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_f32mf2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m1( @@ -416,8 +470,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vcompress_vm_f32m1 (vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t src, size_t vl) { - return vcompress_vm_f32m1(mask, dest, src, vl); +vfloat32m1_t test_vcompress_vm_f32m1 (vfloat32m1_t dest, vfloat32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_f32m1(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m2( @@ -425,8 +479,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vcompress_vm_f32m2 (vbool16_t mask, vfloat32m2_t dest, vfloat32m2_t src, size_t vl) { - return vcompress_vm_f32m2(mask, dest, src, vl); +vfloat32m2_t test_vcompress_vm_f32m2 (vfloat32m2_t dest, vfloat32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_f32m2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m4( @@ -434,8 +488,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vcompress_vm_f32m4 (vbool8_t mask, vfloat32m4_t dest, vfloat32m4_t src, size_t vl) { - return vcompress_vm_f32m4(mask, dest, src, vl); +vfloat32m4_t test_vcompress_vm_f32m4 (vfloat32m4_t dest, vfloat32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_f32m4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m8( @@ -443,8 +497,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vcompress_vm_f32m8 (vbool4_t mask, vfloat32m8_t dest, vfloat32m8_t src, size_t vl) { - return vcompress_vm_f32m8(mask, dest, src, vl); +vfloat32m8_t test_vcompress_vm_f32m8 (vfloat32m8_t dest, vfloat32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_f32m8(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m1( @@ -452,8 +506,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vcompress_vm_f64m1 (vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t src, size_t vl) { - return vcompress_vm_f64m1(mask, dest, src, vl); +vfloat64m1_t test_vcompress_vm_f64m1 (vfloat64m1_t dest, vfloat64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_f64m1(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m2( @@ -461,8 +515,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vcompress_vm_f64m2 (vbool32_t mask, vfloat64m2_t dest, vfloat64m2_t src, size_t vl) { - return vcompress_vm_f64m2(mask, dest, src, vl); +vfloat64m2_t test_vcompress_vm_f64m2 (vfloat64m2_t dest, vfloat64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_f64m2(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m4( @@ -470,8 +524,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vcompress_vm_f64m4 (vbool16_t mask, vfloat64m4_t dest, vfloat64m4_t src, size_t vl) { - return vcompress_vm_f64m4(mask, dest, src, vl); +vfloat64m4_t test_vcompress_vm_f64m4 (vfloat64m4_t dest, vfloat64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_f64m4(dest, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m8( @@ -479,62 +533,125 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vcompress_vm_f64m8 (vbool8_t mask, vfloat64m8_t dest, vfloat64m8_t src, size_t vl) { - return vcompress_vm_f64m8(mask, dest, src, vl); +vfloat64m8_t test_vcompress_vm_f64m8 (vfloat64m8_t dest, vfloat64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_f64m8(dest, src, mask, vl); } -// CHECK-RV64-LABEL: @test_vcompress_vm_f16mf4( +// CHECK-RV64-LABEL: @test_vcompress_vm_i8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vcompress_vm_f16mf4 (vbool64_t mask, vfloat16mf4_t dest, vfloat16mf4_t src, size_t vl) { - return vcompress_vm_f16mf4(mask, dest, src, vl); +vint8mf8_t test_vcompress_vm_i8mf8_tu (vint8mf8_t merge, vint8mf8_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_i8mf8_tu(merge, src, mask, vl); } -// CHECK-RV64-LABEL: @test_vcompress_vm_f16mf2( +// CHECK-RV64-LABEL: @test_vcompress_vm_i8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vcompress_vm_f16mf2 (vbool32_t mask, vfloat16mf2_t dest, vfloat16mf2_t src, size_t vl) { - return vcompress_vm_f16mf2(mask, dest, src, vl); +vint8mf4_t test_vcompress_vm_i8mf4_tu (vint8mf4_t merge, vint8mf4_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_i8mf4_tu(merge, src, mask, vl); } -// CHECK-RV64-LABEL: @test_vcompress_vm_f16m1( +// CHECK-RV64-LABEL: @test_vcompress_vm_i8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vcompress_vm_f16m1 (vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t src, size_t vl) { - return vcompress_vm_f16m1(mask, dest, src, vl); +vint8mf2_t test_vcompress_vm_i8mf2_tu (vint8mf2_t merge, vint8mf2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_i8mf2_tu(merge, src, mask, vl); } -// CHECK-RV64-LABEL: @test_vcompress_vm_f16m2( +// CHECK-RV64-LABEL: @test_vcompress_vm_i8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vcompress_vm_f16m2 (vbool8_t mask, vfloat16m2_t dest, vfloat16m2_t src, size_t vl) { - return vcompress_vm_f16m2(mask, dest, src, vl); +vint8m1_t test_vcompress_vm_i8m1_tu (vint8m1_t merge, vint8m1_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_i8m1_tu(merge, src, mask, vl); } -// CHECK-RV64-LABEL: @test_vcompress_vm_f16m4( +// CHECK-RV64-LABEL: @test_vcompress_vm_i8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vcompress_vm_f16m4 (vbool4_t mask, vfloat16m4_t dest, vfloat16m4_t src, size_t vl) { - return vcompress_vm_f16m4(mask, dest, src, vl); +vint8m2_t test_vcompress_vm_i8m2_tu (vint8m2_t merge, vint8m2_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_i8m2_tu(merge, src, mask, vl); } -// CHECK-RV64-LABEL: @test_vcompress_vm_f16m8( +// CHECK-RV64-LABEL: @test_vcompress_vm_i8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vcompress_vm_i8m4_tu (vint8m4_t merge, vint8m4_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_i8m4_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv64i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vcompress_vm_i8m8_tu (vint8m8_t merge, vint8m8_t src, vbool1_t mask, size_t vl) { + return vcompress_vm_i8m8_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vcompress_vm_i16mf4_tu (vint16mf4_t merge, vint16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_i16mf4_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vcompress_vm_i16mf2_tu (vint16mf2_t merge, vint16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_i16mf2_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vcompress_vm_i16m1_tu (vint16m1_t merge, vint16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_i16m1_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vcompress_vm_i16m2_tu (vint16m2_t merge, vint16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_i16m2_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vcompress_vm_f16m8 (vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t vl) { - return vcompress_vm_f16m8(mask, dest, src, vl); +vint16m4_t test_vcompress_vm_i16m4_tu (vint16m4_t merge, vint16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_i16m4_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vcompress_vm_i16m8_tu (vint16m8_t merge, vint16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_i16m8_tu(merge, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2_tu( @@ -542,8 +659,197 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vcompress_vm_i32mf2_tu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, size_t vl) { - return vcompress_vm_i32mf2_tu(mask, merge, src, vl); +vint32mf2_t test_vcompress_vm_i32mf2_tu (vint32mf2_t merge, vint32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_i32mf2_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vcompress_vm_i32m1_tu (vint32m1_t merge, vint32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_i32m1_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vcompress_vm_i32m2_tu (vint32m2_t merge, vint32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_i32m2_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vcompress_vm_i32m4_tu (vint32m4_t merge, vint32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_i32m4_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vcompress_vm_i32m8_tu (vint32m8_t merge, vint32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_i32m8_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vcompress_vm_i64m1_tu (vint64m1_t merge, vint64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_i64m1_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vcompress_vm_i64m2_tu (vint64m2_t merge, vint64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_i64m2_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vcompress_vm_i64m4_tu (vint64m4_t merge, vint64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_i64m4_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vcompress_vm_i64m8_tu (vint64m8_t merge, vint64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_i64m8_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8mf8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vcompress_vm_u8mf8_tu (vuint8mf8_t merge, vuint8mf8_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_u8mf8_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vcompress_vm_u8mf4_tu (vuint8mf4_t merge, vuint8mf4_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_u8mf4_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vcompress_vm_u8mf2_tu (vuint8mf2_t merge, vuint8mf2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_u8mf2_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vcompress_vm_u8m1_tu (vuint8m1_t merge, vuint8m1_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_u8m1_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vcompress_vm_u8m2_tu (vuint8m2_t merge, vuint8m2_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_u8m2_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vcompress_vm_u8m4_tu (vuint8m4_t merge, vuint8m4_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_u8m4_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv64i8.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vcompress_vm_u8m8_tu (vuint8m8_t merge, vuint8m8_t src, vbool1_t mask, size_t vl) { + return vcompress_vm_u8m8_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vcompress_vm_u16mf4_tu (vuint16mf4_t merge, vuint16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_u16mf4_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vcompress_vm_u16mf2_tu (vuint16mf2_t merge, vuint16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_u16mf2_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vcompress_vm_u16m1_tu (vuint16m1_t merge, vuint16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_u16m1_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vcompress_vm_u16m2_tu (vuint16m2_t merge, vuint16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_u16m2_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vcompress_vm_u16m4_tu (vuint16m4_t merge, vuint16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_u16m4_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vcompress_vm_u16m8_tu (vuint16m8_t merge, vuint16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_u16m8_tu(merge, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2_tu( @@ -551,42 +857,744 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vcompress_vm_u32mf2_tu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, size_t vl) { - return vcompress_vm_u32mf2_tu(mask, merge, src, vl); +vuint32mf2_t test_vcompress_vm_u32mf2_tu (vuint32mf2_t merge, vuint32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_u32mf2_tu(merge, src, mask, vl); } -// CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2_tu( +// CHECK-RV64-LABEL: @test_vcompress_vm_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vcompress_vm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t src, size_t vl) { - return vcompress_vm_f32mf2_tu(mask, merge, src, vl); +vuint32m1_t test_vcompress_vm_u32m1_tu (vuint32m1_t merge, vuint32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_u32m1_tu(merge, src, mask, vl); } -// CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2_ta( +// CHECK-RV64-LABEL: @test_vcompress_vm_u32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vcompress_vm_i32mf2_ta(vbool64_t mask, vint32mf2_t src, size_t vl) { - return vcompress_vm_i32mf2_ta(mask, src, vl); +vuint32m2_t test_vcompress_vm_u32m2_tu (vuint32m2_t merge, vuint32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_u32m2_tu(merge, src, mask, vl); } -// CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2_ta( +// CHECK-RV64-LABEL: @test_vcompress_vm_u32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vcompress_vm_u32mf2_ta(vbool64_t mask, vuint32mf2_t src, size_t vl) { - return vcompress_vm_u32mf2_ta(mask, src, vl); +vuint32m4_t test_vcompress_vm_u32m4_tu (vuint32m4_t merge, vuint32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_u32m4_tu(merge, src, mask, vl); } -// CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2_ta( +// CHECK-RV64-LABEL: @test_vcompress_vm_u32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vcompress_vm_u32m8_tu (vuint32m8_t merge, vuint32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_u32m8_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vcompress_vm_u64m1_tu (vuint64m1_t merge, vuint64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_u64m1_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vcompress_vm_u64m2_tu (vuint64m2_t merge, vuint64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_u64m2_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vcompress_vm_u64m4_tu (vuint64m4_t merge, vuint64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_u64m4_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vcompress_vm_u64m8_tu (vuint64m8_t merge, vuint64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_u64m8_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vcompress_vm_f16mf4_tu (vfloat16mf4_t merge, vfloat16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_f16mf4_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vcompress_vm_f16mf2_tu (vfloat16mf2_t merge, vfloat16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_f16mf2_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vcompress_vm_f16m1_tu (vfloat16m1_t merge, vfloat16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_f16m1_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vcompress_vm_f16m2_tu (vfloat16m2_t merge, vfloat16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_f16m2_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vcompress_vm_f16m4_tu (vfloat16m4_t merge, vfloat16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_f16m4_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32f16.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vcompress_vm_f16m8_tu (vfloat16m8_t merge, vfloat16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_f16m8_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vcompress_vm_f32mf2_tu (vfloat32mf2_t merge, vfloat32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_f32mf2_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vcompress_vm_f32m1_tu (vfloat32m1_t merge, vfloat32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_f32m1_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vcompress_vm_f32m2_tu (vfloat32m2_t merge, vfloat32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_f32m2_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vcompress_vm_f32m4_tu (vfloat32m4_t merge, vfloat32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_f32m4_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vcompress_vm_f32m8_tu (vfloat32m8_t merge, vfloat32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_f32m8_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vcompress_vm_f64m1_tu (vfloat64m1_t merge, vfloat64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_f64m1_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vcompress_vm_f64m2_tu (vfloat64m2_t merge, vfloat64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_f64m2_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vcompress_vm_f64m4_tu (vfloat64m4_t merge, vfloat64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_f64m4_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f64.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vcompress_vm_f64m8_tu (vfloat64m8_t merge, vfloat64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_f64m8_tu(merge, src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8mf8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vcompress_vm_i8mf8_ta (vint8mf8_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_i8mf8_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vcompress_vm_i8mf4_ta (vint8mf4_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_i8mf4_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vcompress_vm_i8mf2_ta (vint8mf2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_i8mf2_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vcompress_vm_i8m1_ta (vint8m1_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_i8m1_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vcompress_vm_i8m2_ta (vint8m2_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_i8m2_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vcompress_vm_i8m4_ta (vint8m4_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_i8m4_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i8m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv64i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vcompress_vm_i8m8_ta (vint8m8_t src, vbool1_t mask, size_t vl) { + return vcompress_vm_i8m8_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vcompress_vm_i16mf4_ta (vint16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_i16mf4_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vcompress_vm_i16mf2_ta (vint16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_i16mf2_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vcompress_vm_i16m1_ta (vint16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_i16m1_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vcompress_vm_i16m2_ta (vint16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_i16m2_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vcompress_vm_i16m4_ta (vint16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_i16m4_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vcompress_vm_i16m8_ta (vint16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_i16m8_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vcompress_vm_i32mf2_ta (vint32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_i32mf2_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vcompress_vm_i32m1_ta (vint32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_i32m1_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vcompress_vm_i32m2_ta (vint32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_i32m2_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vcompress_vm_i32m4_ta (vint32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_i32m4_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vcompress_vm_i32m8_ta (vint32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_i32m8_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vcompress_vm_i64m1_ta (vint64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_i64m1_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vcompress_vm_i64m2_ta (vint64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_i64m2_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vcompress_vm_i64m4_ta (vint64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_i64m4_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vcompress_vm_i64m8_ta (vint64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_i64m8_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8mf8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vcompress_vm_u8mf8_ta (vuint8mf8_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_u8mf8_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vcompress_vm_u8mf4_ta (vuint8mf4_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_u8mf4_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vcompress_vm_u8mf2_ta (vuint8mf2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_u8mf2_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vcompress_vm_u8m1_ta (vuint8m1_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_u8m1_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vcompress_vm_u8m2_ta (vuint8m2_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_u8m2_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vcompress_vm_u8m4_ta (vuint8m4_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_u8m4_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u8m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv64i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vcompress_vm_u8m8_ta (vuint8m8_t src, vbool1_t mask, size_t vl) { + return vcompress_vm_u8m8_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vcompress_vm_u16mf4_ta (vuint16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_u16mf4_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vcompress_vm_u16mf2_ta (vuint16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_u16mf2_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vcompress_vm_u16m1_ta (vuint16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_u16m1_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vcompress_vm_u16m2_ta (vuint16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_u16m2_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vcompress_vm_u16m4_ta (vuint16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_u16m4_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vcompress_vm_u16m8_ta (vuint16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_u16m8_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vcompress_vm_u32mf2_ta (vuint32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_u32mf2_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vcompress_vm_u32m1_ta (vuint32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_u32m1_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vcompress_vm_u32m2_ta (vuint32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_u32m2_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vcompress_vm_u32m4_ta (vuint32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_u32m4_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vcompress_vm_u32m8_ta (vuint32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_u32m8_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vcompress_vm_u64m1_ta (vuint64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_u64m1_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vcompress_vm_u64m2_ta (vuint64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_u64m2_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vcompress_vm_u64m4_ta (vuint64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_u64m4_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vcompress_vm_u64m8_ta (vuint64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_u64m8_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vcompress_vm_f16mf4_ta (vfloat16mf4_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_f16mf4_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vcompress_vm_f16mf2_ta (vfloat16mf2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_f16mf2_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vcompress_vm_f16m1_ta (vfloat16m1_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_f16m1_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vcompress_vm_f16m2_ta (vfloat16m2_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_f16m2_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vcompress_vm_f16m4_ta (vfloat16m4_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_f16m4_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vcompress_vm_f16m8_ta (vfloat16m8_t src, vbool2_t mask, size_t vl) { + return vcompress_vm_f16m8_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vcompress_vm_f32mf2_ta (vfloat32mf2_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_f32mf2_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vcompress_vm_f32m1_ta (vfloat32m1_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_f32m1_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vcompress_vm_f32m2_ta (vfloat32m2_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_f32m2_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vcompress_vm_f32m4_ta (vfloat32m4_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_f32m4_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vcompress_vm_f32m8_ta (vfloat32m8_t src, vbool4_t mask, size_t vl) { + return vcompress_vm_f32m8_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vcompress_vm_f64m1_ta (vfloat64m1_t src, vbool64_t mask, size_t vl) { + return vcompress_vm_f64m1_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vcompress_vm_f64m2_ta (vfloat64m2_t src, vbool32_t mask, size_t vl) { + return vcompress_vm_f64m2_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vcompress_vm_f64m4_ta (vfloat64m4_t src, vbool16_t mask, size_t vl) { + return vcompress_vm_f64m4_ta(src, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vcompress_vm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return vcompress_vm_f32mf2_ta(mask, src, vl); +vfloat64m8_t test_vcompress_vm_f64m8_ta (vfloat64m8_t src, vbool8_t mask, size_t vl) { + return vcompress_vm_f64m8_ta(src, mask, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmerge.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmerge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmerge.c @@ -6,14 +6,67 @@ #include +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmerge_vfm_f16mf4 (vfloat16mf4_t op1, _Float16 op2, vbool64_t mask, size_t vl) { + return vfmerge_vfm_f16mf4(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmerge_vfm_f16mf2 (vfloat16mf2_t op1, _Float16 op2, vbool32_t mask, size_t vl) { + return vfmerge_vfm_f16mf2(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmerge_vfm_f16m1 (vfloat16m1_t op1, _Float16 op2, vbool16_t mask, size_t vl) { + return vfmerge_vfm_f16m1(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmerge_vfm_f16m2 (vfloat16m2_t op1, _Float16 op2, vbool8_t mask, size_t vl) { + return vfmerge_vfm_f16m2(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmerge_vfm_f16m4 (vfloat16m4_t op1, _Float16 op2, vbool4_t mask, size_t vl) { + return vfmerge_vfm_f16m4(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmerge_vfm_f16m8 (vfloat16m8_t op1, _Float16 op2, vbool2_t mask, size_t vl) { + return vfmerge_vfm_f16m8(op1, op2, mask, vl); +} + // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmerge_vfm_f32mf2(vbool64_t mask, vfloat32mf2_t op1, - float op2, size_t vl) { - return vfmerge_vfm_f32mf2(mask, op1, op2, vl); +vfloat32mf2_t test_vfmerge_vfm_f32mf2 (vfloat32mf2_t op1, float op2, vbool64_t mask, size_t vl) { + return vfmerge_vfm_f32mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m1( @@ -21,9 +74,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmerge_vfm_f32m1(vbool32_t mask, vfloat32m1_t op1, float op2, - size_t vl) { - return vfmerge_vfm_f32m1(mask, op1, op2, vl); +vfloat32m1_t test_vfmerge_vfm_f32m1 (vfloat32m1_t op1, float op2, vbool32_t mask, size_t vl) { + return vfmerge_vfm_f32m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m2( @@ -31,9 +83,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmerge_vfm_f32m2(vbool16_t mask, vfloat32m2_t op1, float op2, - size_t vl) { - return vfmerge_vfm_f32m2(mask, op1, op2, vl); +vfloat32m2_t test_vfmerge_vfm_f32m2 (vfloat32m2_t op1, float op2, vbool16_t mask, size_t vl) { + return vfmerge_vfm_f32m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m4( @@ -41,9 +92,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmerge_vfm_f32m4(vbool8_t mask, vfloat32m4_t op1, float op2, - size_t vl) { - return vfmerge_vfm_f32m4(mask, op1, op2, vl); +vfloat32m4_t test_vfmerge_vfm_f32m4 (vfloat32m4_t op1, float op2, vbool8_t mask, size_t vl) { + return vfmerge_vfm_f32m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m8( @@ -51,9 +101,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmerge_vfm_f32m8(vbool4_t mask, vfloat32m8_t op1, float op2, - size_t vl) { - return vfmerge_vfm_f32m8(mask, op1, op2, vl); +vfloat32m8_t test_vfmerge_vfm_f32m8 (vfloat32m8_t op1, float op2, vbool4_t mask, size_t vl) { + return vfmerge_vfm_f32m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m1( @@ -61,9 +110,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmerge_vfm_f64m1(vbool64_t mask, vfloat64m1_t op1, - double op2, size_t vl) { - return vfmerge_vfm_f64m1(mask, op1, op2, vl); +vfloat64m1_t test_vfmerge_vfm_f64m1 (vfloat64m1_t op1, double op2, vbool64_t mask, size_t vl) { + return vfmerge_vfm_f64m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m2( @@ -71,9 +119,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmerge_vfm_f64m2(vbool32_t mask, vfloat64m2_t op1, - double op2, size_t vl) { - return vfmerge_vfm_f64m2(mask, op1, op2, vl); +vfloat64m2_t test_vfmerge_vfm_f64m2 (vfloat64m2_t op1, double op2, vbool32_t mask, size_t vl) { + return vfmerge_vfm_f64m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m4( @@ -81,9 +128,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmerge_vfm_f64m4(vbool16_t mask, vfloat64m4_t op1, - double op2, size_t vl) { - return vfmerge_vfm_f64m4(mask, op1, op2, vl); +vfloat64m4_t test_vfmerge_vfm_f64m4 (vfloat64m4_t op1, double op2, vbool16_t mask, size_t vl) { + return vfmerge_vfm_f64m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m8( @@ -91,79 +137,276 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmerge_vfm_f64m8(vbool8_t mask, vfloat64m8_t op1, double op2, - size_t vl) { - return vfmerge_vfm_f64m8(mask, op1, op2, vl); +vfloat64m8_t test_vfmerge_vfm_f64m8 (vfloat64m8_t op1, double op2, vbool8_t mask, size_t vl) { + return vfmerge_vfm_f64m8(op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16mf4( +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f16.f16.i64( [[MERGE:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmerge_vfm_f16mf4_tu (vfloat16mf4_t merge, vfloat16mf4_t op1, _Float16 op2, vbool64_t mask, size_t vl) { + return vfmerge_vfm_f16mf4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f16.f16.i64( [[MERGE:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmerge_vfm_f16mf2_tu (vfloat16mf2_t merge, vfloat16mf2_t op1, _Float16 op2, vbool32_t mask, size_t vl) { + return vfmerge_vfm_f16mf2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f16.f16.i64( [[MERGE:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmerge_vfm_f16m1_tu (vfloat16m1_t merge, vfloat16m1_t op1, _Float16 op2, vbool16_t mask, size_t vl) { + return vfmerge_vfm_f16m1_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f16.f16.i64( [[MERGE:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmerge_vfm_f16m2_tu (vfloat16m2_t merge, vfloat16m2_t op1, _Float16 op2, vbool8_t mask, size_t vl) { + return vfmerge_vfm_f16m2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f16.f16.i64( [[MERGE:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmerge_vfm_f16m4_tu (vfloat16m4_t merge, vfloat16m4_t op1, _Float16 op2, vbool4_t mask, size_t vl) { + return vfmerge_vfm_f16m4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv32f16.f16.i64( [[MERGE:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmerge_vfm_f16m8_tu (vfloat16m8_t merge, vfloat16m8_t op1, _Float16 op2, vbool2_t mask, size_t vl) { + return vfmerge_vfm_f16m8_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmerge_vfm_f32mf2_tu (vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, vbool64_t mask, size_t vl) { + return vfmerge_vfm_f32mf2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmerge_vfm_f32m1_tu (vfloat32m1_t merge, vfloat32m1_t op1, float op2, vbool32_t mask, size_t vl) { + return vfmerge_vfm_f32m1_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmerge_vfm_f32m2_tu (vfloat32m2_t merge, vfloat32m2_t op1, float op2, vbool16_t mask, size_t vl) { + return vfmerge_vfm_f32m2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmerge_vfm_f32m4_tu (vfloat32m4_t merge, vfloat32m4_t op1, float op2, vbool8_t mask, size_t vl) { + return vfmerge_vfm_f32m4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmerge_vfm_f32m8_tu (vfloat32m8_t merge, vfloat32m8_t op1, float op2, vbool4_t mask, size_t vl) { + return vfmerge_vfm_f32m8_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f64.f64.i64( [[MERGE:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmerge_vfm_f64m1_tu (vfloat64m1_t merge, vfloat64m1_t op1, double op2, vbool64_t mask, size_t vl) { + return vfmerge_vfm_f64m1_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f64.f64.i64( [[MERGE:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmerge_vfm_f64m2_tu (vfloat64m2_t merge, vfloat64m2_t op1, double op2, vbool32_t mask, size_t vl) { + return vfmerge_vfm_f64m2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f64.f64.i64( [[MERGE:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmerge_vfm_f64m4_tu (vfloat64m4_t merge, vfloat64m4_t op1, double op2, vbool16_t mask, size_t vl) { + return vfmerge_vfm_f64m4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f64.f64.i64( [[MERGE:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmerge_vfm_f64m8_tu (vfloat64m8_t merge, vfloat64m8_t op1, double op2, vbool8_t mask, size_t vl) { + return vfmerge_vfm_f64m8_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16mf4_ta( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmerge_vfm_f16mf4(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfmerge_vfm_f16mf4(mask, op1, op2, vl); +vfloat16mf4_t test_vfmerge_vfm_f16mf4_ta (vfloat16mf4_t op1, _Float16 op2, vbool64_t mask, size_t vl) { + return vfmerge_vfm_f16mf4_ta(op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16mf2( +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16mf2_ta( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmerge_vfm_f16mf2(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfmerge_vfm_f16mf2(mask, op1, op2, vl); +vfloat16mf2_t test_vfmerge_vfm_f16mf2_ta (vfloat16mf2_t op1, _Float16 op2, vbool32_t mask, size_t vl) { + return vfmerge_vfm_f16mf2_ta(op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m1( +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m1_ta( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmerge_vfm_f16m1(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfmerge_vfm_f16m1(mask, op1, op2, vl); +vfloat16m1_t test_vfmerge_vfm_f16m1_ta (vfloat16m1_t op1, _Float16 op2, vbool16_t mask, size_t vl) { + return vfmerge_vfm_f16m1_ta(op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m2( +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m2_ta( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmerge_vfm_f16m2(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfmerge_vfm_f16m2(mask, op1, op2, vl); +vfloat16m2_t test_vfmerge_vfm_f16m2_ta (vfloat16m2_t op1, _Float16 op2, vbool8_t mask, size_t vl) { + return vfmerge_vfm_f16m2_ta(op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m4( +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m4_ta( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmerge_vfm_f16m4(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfmerge_vfm_f16m4(mask, op1, op2, vl); +vfloat16m4_t test_vfmerge_vfm_f16m4_ta (vfloat16m4_t op1, _Float16 op2, vbool4_t mask, size_t vl) { + return vfmerge_vfm_f16m4_ta(op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m8( +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m8_ta( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmerge_vfm_f16m8(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfmerge_vfm_f16m8(mask, op1, op2, vl); +vfloat16m8_t test_vfmerge_vfm_f16m8_ta (vfloat16m8_t op1, _Float16 op2, vbool2_t mask, size_t vl) { + return vfmerge_vfm_f16m8_ta(op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2_tu( +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmerge_vfm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { - return vfmerge_vfm_f32mf2_tu(mask, merge, op1, op2, vl); +vfloat32mf2_t test_vfmerge_vfm_f32mf2_ta (vfloat32mf2_t op1, float op2, vbool64_t mask, size_t vl) { + return vfmerge_vfm_f32mf2_ta(op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2_ta( +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmerge_vfm_f32m1_ta (vfloat32m1_t op1, float op2, vbool32_t mask, size_t vl) { + return vfmerge_vfm_f32m1_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmerge_vfm_f32m2_ta (vfloat32m2_t op1, float op2, vbool16_t mask, size_t vl) { + return vfmerge_vfm_f32m2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmerge_vfm_f32m4_ta (vfloat32m4_t op1, float op2, vbool8_t mask, size_t vl) { + return vfmerge_vfm_f32m4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmerge_vfm_f32m8_ta (vfloat32m8_t op1, float op2, vbool4_t mask, size_t vl) { + return vfmerge_vfm_f32m8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmerge_vfm_f64m1_ta (vfloat64m1_t op1, double op2, vbool64_t mask, size_t vl) { + return vfmerge_vfm_f64m1_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmerge_vfm_f64m2_ta (vfloat64m2_t op1, double op2, vbool32_t mask, size_t vl) { + return vfmerge_vfm_f64m2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmerge_vfm_f64m4_ta (vfloat64m4_t op1, double op2, vbool16_t mask, size_t vl) { + return vfmerge_vfm_f64m4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmerge_vfm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { - return vfmerge_vfm_f32mf2_ta(mask, op1, op2, vl); +vfloat64m8_t test_vfmerge_vfm_f64m8_ta (vfloat64m8_t op1, double op2, vbool8_t mask, size_t vl) { + return vfmerge_vfm_f64m8_ta(op1, op2, mask, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmerge.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmerge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmerge.c @@ -11,9 +11,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmerge_vvm_i8mf8(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, - size_t vl) { - return vmerge_vvm_i8mf8(mask, op1, op2, vl); +vint8mf8_t test_vmerge_vvm_i8mf8 (vint8mf8_t op1, vint8mf8_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_i8mf8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf8( @@ -21,9 +20,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmerge_vxm_i8mf8(vbool64_t mask, vint8mf8_t op1, int8_t op2, - size_t vl) { - return vmerge_vxm_i8mf8(mask, op1, op2, vl); +vint8mf8_t test_vmerge_vxm_i8mf8 (vint8mf8_t op1, int8_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_i8mf8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf4( @@ -31,9 +29,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmerge_vvm_i8mf4(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, - size_t vl) { - return vmerge_vvm_i8mf4(mask, op1, op2, vl); +vint8mf4_t test_vmerge_vvm_i8mf4 (vint8mf4_t op1, vint8mf4_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_i8mf4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf4( @@ -41,9 +38,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmerge_vxm_i8mf4(vbool32_t mask, vint8mf4_t op1, int8_t op2, - size_t vl) { - return vmerge_vxm_i8mf4(mask, op1, op2, vl); +vint8mf4_t test_vmerge_vxm_i8mf4 (vint8mf4_t op1, int8_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_i8mf4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf2( @@ -51,9 +47,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmerge_vvm_i8mf2(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, - size_t vl) { - return vmerge_vvm_i8mf2(mask, op1, op2, vl); +vint8mf2_t test_vmerge_vvm_i8mf2 (vint8mf2_t op1, vint8mf2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_i8mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf2( @@ -61,9 +56,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmerge_vxm_i8mf2(vbool16_t mask, vint8mf2_t op1, int8_t op2, - size_t vl) { - return vmerge_vxm_i8mf2(mask, op1, op2, vl); +vint8mf2_t test_vmerge_vxm_i8mf2 (vint8mf2_t op1, int8_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_i8mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m1( @@ -71,9 +65,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmerge_vvm_i8m1(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, - size_t vl) { - return vmerge_vvm_i8m1(mask, op1, op2, vl); +vint8m1_t test_vmerge_vvm_i8m1 (vint8m1_t op1, vint8m1_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_i8m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m1( @@ -81,9 +74,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmerge_vxm_i8m1(vbool8_t mask, vint8m1_t op1, int8_t op2, - size_t vl) { - return vmerge_vxm_i8m1(mask, op1, op2, vl); +vint8m1_t test_vmerge_vxm_i8m1 (vint8m1_t op1, int8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_i8m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m2( @@ -91,9 +83,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmerge_vvm_i8m2(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, - size_t vl) { - return vmerge_vvm_i8m2(mask, op1, op2, vl); +vint8m2_t test_vmerge_vvm_i8m2 (vint8m2_t op1, vint8m2_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_i8m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m2( @@ -101,9 +92,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmerge_vxm_i8m2(vbool4_t mask, vint8m2_t op1, int8_t op2, - size_t vl) { - return vmerge_vxm_i8m2(mask, op1, op2, vl); +vint8m2_t test_vmerge_vxm_i8m2 (vint8m2_t op1, int8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_i8m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m4( @@ -111,9 +101,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmerge_vvm_i8m4(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, - size_t vl) { - return vmerge_vvm_i8m4(mask, op1, op2, vl); +vint8m4_t test_vmerge_vvm_i8m4 (vint8m4_t op1, vint8m4_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_i8m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m4( @@ -121,9 +110,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmerge_vxm_i8m4(vbool2_t mask, vint8m4_t op1, int8_t op2, - size_t vl) { - return vmerge_vxm_i8m4(mask, op1, op2, vl); +vint8m4_t test_vmerge_vxm_i8m4 (vint8m4_t op1, int8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vxm_i8m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m8( @@ -131,9 +119,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmerge_vvm_i8m8(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, - size_t vl) { - return vmerge_vvm_i8m8(mask, op1, op2, vl); +vint8m8_t test_vmerge_vvm_i8m8 (vint8m8_t op1, vint8m8_t op2, vbool1_t mask, size_t vl) { + return vmerge_vvm_i8m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m8( @@ -141,9 +128,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmerge_vxm_i8m8(vbool1_t mask, vint8m8_t op1, int8_t op2, - size_t vl) { - return vmerge_vxm_i8m8(mask, op1, op2, vl); +vint8m8_t test_vmerge_vxm_i8m8 (vint8m8_t op1, int8_t op2, vbool1_t mask, size_t vl) { + return vmerge_vxm_i8m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf4( @@ -151,9 +137,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmerge_vvm_i16mf4(vbool64_t mask, vint16mf4_t op1, - vint16mf4_t op2, size_t vl) { - return vmerge_vvm_i16mf4(mask, op1, op2, vl); +vint16mf4_t test_vmerge_vvm_i16mf4 (vint16mf4_t op1, vint16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_i16mf4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf4( @@ -161,9 +146,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmerge_vxm_i16mf4(vbool64_t mask, vint16mf4_t op1, int16_t op2, - size_t vl) { - return vmerge_vxm_i16mf4(mask, op1, op2, vl); +vint16mf4_t test_vmerge_vxm_i16mf4 (vint16mf4_t op1, int16_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_i16mf4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf2( @@ -171,9 +155,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmerge_vvm_i16mf2(vbool32_t mask, vint16mf2_t op1, - vint16mf2_t op2, size_t vl) { - return vmerge_vvm_i16mf2(mask, op1, op2, vl); +vint16mf2_t test_vmerge_vvm_i16mf2 (vint16mf2_t op1, vint16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_i16mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf2( @@ -181,9 +164,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmerge_vxm_i16mf2(vbool32_t mask, vint16mf2_t op1, int16_t op2, - size_t vl) { - return vmerge_vxm_i16mf2(mask, op1, op2, vl); +vint16mf2_t test_vmerge_vxm_i16mf2 (vint16mf2_t op1, int16_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_i16mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m1( @@ -191,9 +173,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmerge_vvm_i16m1(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, - size_t vl) { - return vmerge_vvm_i16m1(mask, op1, op2, vl); +vint16m1_t test_vmerge_vvm_i16m1 (vint16m1_t op1, vint16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_i16m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m1( @@ -201,9 +182,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmerge_vxm_i16m1(vbool16_t mask, vint16m1_t op1, int16_t op2, - size_t vl) { - return vmerge_vxm_i16m1(mask, op1, op2, vl); +vint16m1_t test_vmerge_vxm_i16m1 (vint16m1_t op1, int16_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_i16m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m2( @@ -211,9 +191,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmerge_vvm_i16m2(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, - size_t vl) { - return vmerge_vvm_i16m2(mask, op1, op2, vl); +vint16m2_t test_vmerge_vvm_i16m2 (vint16m2_t op1, vint16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_i16m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m2( @@ -221,9 +200,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmerge_vxm_i16m2(vbool8_t mask, vint16m2_t op1, int16_t op2, - size_t vl) { - return vmerge_vxm_i16m2(mask, op1, op2, vl); +vint16m2_t test_vmerge_vxm_i16m2 (vint16m2_t op1, int16_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_i16m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m4( @@ -231,9 +209,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmerge_vvm_i16m4(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, - size_t vl) { - return vmerge_vvm_i16m4(mask, op1, op2, vl); +vint16m4_t test_vmerge_vvm_i16m4 (vint16m4_t op1, vint16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_i16m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m4( @@ -241,9 +218,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmerge_vxm_i16m4(vbool4_t mask, vint16m4_t op1, int16_t op2, - size_t vl) { - return vmerge_vxm_i16m4(mask, op1, op2, vl); +vint16m4_t test_vmerge_vxm_i16m4 (vint16m4_t op1, int16_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_i16m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m8( @@ -251,9 +227,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmerge_vvm_i16m8(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, - size_t vl) { - return vmerge_vvm_i16m8(mask, op1, op2, vl); +vint16m8_t test_vmerge_vvm_i16m8 (vint16m8_t op1, vint16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_i16m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m8( @@ -261,9 +236,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmerge_vxm_i16m8(vbool2_t mask, vint16m8_t op1, int16_t op2, - size_t vl) { - return vmerge_vxm_i16m8(mask, op1, op2, vl); +vint16m8_t test_vmerge_vxm_i16m8 (vint16m8_t op1, int16_t op2, vbool2_t mask, size_t vl) { + return vmerge_vxm_i16m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2( @@ -271,9 +245,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmerge_vvm_i32mf2(vbool64_t mask, vint32mf2_t op1, - vint32mf2_t op2, size_t vl) { - return vmerge_vvm_i32mf2(mask, op1, op2, vl); +vint32mf2_t test_vmerge_vvm_i32mf2 (vint32mf2_t op1, vint32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_i32mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2( @@ -281,9 +254,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmerge_vxm_i32mf2(vbool64_t mask, vint32mf2_t op1, int32_t op2, - size_t vl) { - return vmerge_vxm_i32mf2(mask, op1, op2, vl); +vint32mf2_t test_vmerge_vxm_i32mf2 (vint32mf2_t op1, int32_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_i32mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m1( @@ -291,9 +263,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmerge_vvm_i32m1(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, - size_t vl) { - return vmerge_vvm_i32m1(mask, op1, op2, vl); +vint32m1_t test_vmerge_vvm_i32m1 (vint32m1_t op1, vint32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_i32m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m1( @@ -301,9 +272,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmerge_vxm_i32m1(vbool32_t mask, vint32m1_t op1, int32_t op2, - size_t vl) { - return vmerge_vxm_i32m1(mask, op1, op2, vl); +vint32m1_t test_vmerge_vxm_i32m1 (vint32m1_t op1, int32_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_i32m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m2( @@ -311,9 +281,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmerge_vvm_i32m2(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, - size_t vl) { - return vmerge_vvm_i32m2(mask, op1, op2, vl); +vint32m2_t test_vmerge_vvm_i32m2 (vint32m2_t op1, vint32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_i32m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m2( @@ -321,9 +290,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmerge_vxm_i32m2(vbool16_t mask, vint32m2_t op1, int32_t op2, - size_t vl) { - return vmerge_vxm_i32m2(mask, op1, op2, vl); +vint32m2_t test_vmerge_vxm_i32m2 (vint32m2_t op1, int32_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_i32m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m4( @@ -331,9 +299,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmerge_vvm_i32m4(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, - size_t vl) { - return vmerge_vvm_i32m4(mask, op1, op2, vl); +vint32m4_t test_vmerge_vvm_i32m4 (vint32m4_t op1, vint32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_i32m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m4( @@ -341,9 +308,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmerge_vxm_i32m4(vbool8_t mask, vint32m4_t op1, int32_t op2, - size_t vl) { - return vmerge_vxm_i32m4(mask, op1, op2, vl); +vint32m4_t test_vmerge_vxm_i32m4 (vint32m4_t op1, int32_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_i32m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m8( @@ -351,9 +317,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmerge_vvm_i32m8(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, - size_t vl) { - return vmerge_vvm_i32m8(mask, op1, op2, vl); +vint32m8_t test_vmerge_vvm_i32m8 (vint32m8_t op1, vint32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_i32m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m8( @@ -361,9 +326,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmerge_vxm_i32m8(vbool4_t mask, vint32m8_t op1, int32_t op2, - size_t vl) { - return vmerge_vxm_i32m8(mask, op1, op2, vl); +vint32m8_t test_vmerge_vxm_i32m8 (vint32m8_t op1, int32_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_i32m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m1( @@ -371,9 +335,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmerge_vvm_i64m1(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, - size_t vl) { - return vmerge_vvm_i64m1(mask, op1, op2, vl); +vint64m1_t test_vmerge_vvm_i64m1 (vint64m1_t op1, vint64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_i64m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m1( @@ -381,9 +344,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmerge_vxm_i64m1(vbool64_t mask, vint64m1_t op1, int64_t op2, - size_t vl) { - return vmerge_vxm_i64m1(mask, op1, op2, vl); +vint64m1_t test_vmerge_vxm_i64m1 (vint64m1_t op1, int64_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_i64m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m2( @@ -391,9 +353,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmerge_vvm_i64m2(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, - size_t vl) { - return vmerge_vvm_i64m2(mask, op1, op2, vl); +vint64m2_t test_vmerge_vvm_i64m2 (vint64m2_t op1, vint64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_i64m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m2( @@ -401,9 +362,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmerge_vxm_i64m2(vbool32_t mask, vint64m2_t op1, int64_t op2, - size_t vl) { - return vmerge_vxm_i64m2(mask, op1, op2, vl); +vint64m2_t test_vmerge_vxm_i64m2 (vint64m2_t op1, int64_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_i64m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m4( @@ -411,9 +371,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmerge_vvm_i64m4(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, - size_t vl) { - return vmerge_vvm_i64m4(mask, op1, op2, vl); +vint64m4_t test_vmerge_vvm_i64m4 (vint64m4_t op1, vint64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_i64m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m4( @@ -421,9 +380,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmerge_vxm_i64m4(vbool16_t mask, vint64m4_t op1, int64_t op2, - size_t vl) { - return vmerge_vxm_i64m4(mask, op1, op2, vl); +vint64m4_t test_vmerge_vxm_i64m4 (vint64m4_t op1, int64_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_i64m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m8( @@ -431,9 +389,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmerge_vvm_i64m8(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, - size_t vl) { - return vmerge_vvm_i64m8(mask, op1, op2, vl); +vint64m8_t test_vmerge_vvm_i64m8 (vint64m8_t op1, vint64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_i64m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m8( @@ -441,9 +398,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmerge_vxm_i64m8(vbool8_t mask, vint64m8_t op1, int64_t op2, - size_t vl) { - return vmerge_vxm_i64m8(mask, op1, op2, vl); +vint64m8_t test_vmerge_vxm_i64m8 (vint64m8_t op1, int64_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_i64m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf8( @@ -451,9 +407,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmerge_vvm_u8mf8(vbool64_t mask, vuint8mf8_t op1, - vuint8mf8_t op2, size_t vl) { - return vmerge_vvm_u8mf8(mask, op1, op2, vl); +vuint8mf8_t test_vmerge_vvm_u8mf8 (vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_u8mf8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf8( @@ -461,9 +416,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmerge_vxm_u8mf8(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, - size_t vl) { - return vmerge_vxm_u8mf8(mask, op1, op2, vl); +vuint8mf8_t test_vmerge_vxm_u8mf8 (vuint8mf8_t op1, uint8_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_u8mf8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf4( @@ -471,9 +425,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmerge_vvm_u8mf4(vbool32_t mask, vuint8mf4_t op1, - vuint8mf4_t op2, size_t vl) { - return vmerge_vvm_u8mf4(mask, op1, op2, vl); +vuint8mf4_t test_vmerge_vvm_u8mf4 (vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_u8mf4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf4( @@ -481,9 +434,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmerge_vxm_u8mf4(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, - size_t vl) { - return vmerge_vxm_u8mf4(mask, op1, op2, vl); +vuint8mf4_t test_vmerge_vxm_u8mf4 (vuint8mf4_t op1, uint8_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_u8mf4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf2( @@ -491,9 +443,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmerge_vvm_u8mf2(vbool16_t mask, vuint8mf2_t op1, - vuint8mf2_t op2, size_t vl) { - return vmerge_vvm_u8mf2(mask, op1, op2, vl); +vuint8mf2_t test_vmerge_vvm_u8mf2 (vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_u8mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf2( @@ -501,9 +452,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmerge_vxm_u8mf2(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, - size_t vl) { - return vmerge_vxm_u8mf2(mask, op1, op2, vl); +vuint8mf2_t test_vmerge_vxm_u8mf2 (vuint8mf2_t op1, uint8_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_u8mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m1( @@ -511,9 +461,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmerge_vvm_u8m1(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, - size_t vl) { - return vmerge_vvm_u8m1(mask, op1, op2, vl); +vuint8m1_t test_vmerge_vvm_u8m1 (vuint8m1_t op1, vuint8m1_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_u8m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m1( @@ -521,9 +470,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmerge_vxm_u8m1(vbool8_t mask, vuint8m1_t op1, uint8_t op2, - size_t vl) { - return vmerge_vxm_u8m1(mask, op1, op2, vl); +vuint8m1_t test_vmerge_vxm_u8m1 (vuint8m1_t op1, uint8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_u8m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m2( @@ -531,9 +479,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmerge_vvm_u8m2(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, - size_t vl) { - return vmerge_vvm_u8m2(mask, op1, op2, vl); +vuint8m2_t test_vmerge_vvm_u8m2 (vuint8m2_t op1, vuint8m2_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_u8m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m2( @@ -541,9 +488,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmerge_vxm_u8m2(vbool4_t mask, vuint8m2_t op1, uint8_t op2, - size_t vl) { - return vmerge_vxm_u8m2(mask, op1, op2, vl); +vuint8m2_t test_vmerge_vxm_u8m2 (vuint8m2_t op1, uint8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_u8m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m4( @@ -551,9 +497,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmerge_vvm_u8m4(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, - size_t vl) { - return vmerge_vvm_u8m4(mask, op1, op2, vl); +vuint8m4_t test_vmerge_vvm_u8m4 (vuint8m4_t op1, vuint8m4_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_u8m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m4( @@ -561,9 +506,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmerge_vxm_u8m4(vbool2_t mask, vuint8m4_t op1, uint8_t op2, - size_t vl) { - return vmerge_vxm_u8m4(mask, op1, op2, vl); +vuint8m4_t test_vmerge_vxm_u8m4 (vuint8m4_t op1, uint8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vxm_u8m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m8( @@ -571,9 +515,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmerge_vvm_u8m8(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, - size_t vl) { - return vmerge_vvm_u8m8(mask, op1, op2, vl); +vuint8m8_t test_vmerge_vvm_u8m8 (vuint8m8_t op1, vuint8m8_t op2, vbool1_t mask, size_t vl) { + return vmerge_vvm_u8m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m8( @@ -581,9 +524,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmerge_vxm_u8m8(vbool1_t mask, vuint8m8_t op1, uint8_t op2, - size_t vl) { - return vmerge_vxm_u8m8(mask, op1, op2, vl); +vuint8m8_t test_vmerge_vxm_u8m8 (vuint8m8_t op1, uint8_t op2, vbool1_t mask, size_t vl) { + return vmerge_vxm_u8m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf4( @@ -591,9 +533,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmerge_vvm_u16mf4(vbool64_t mask, vuint16mf4_t op1, - vuint16mf4_t op2, size_t vl) { - return vmerge_vvm_u16mf4(mask, op1, op2, vl); +vuint16mf4_t test_vmerge_vvm_u16mf4 (vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_u16mf4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf4( @@ -601,9 +542,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmerge_vxm_u16mf4(vbool64_t mask, vuint16mf4_t op1, - uint16_t op2, size_t vl) { - return vmerge_vxm_u16mf4(mask, op1, op2, vl); +vuint16mf4_t test_vmerge_vxm_u16mf4 (vuint16mf4_t op1, uint16_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_u16mf4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf2( @@ -611,9 +551,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmerge_vvm_u16mf2(vbool32_t mask, vuint16mf2_t op1, - vuint16mf2_t op2, size_t vl) { - return vmerge_vvm_u16mf2(mask, op1, op2, vl); +vuint16mf2_t test_vmerge_vvm_u16mf2 (vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_u16mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf2( @@ -621,9 +560,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmerge_vxm_u16mf2(vbool32_t mask, vuint16mf2_t op1, - uint16_t op2, size_t vl) { - return vmerge_vxm_u16mf2(mask, op1, op2, vl); +vuint16mf2_t test_vmerge_vxm_u16mf2 (vuint16mf2_t op1, uint16_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_u16mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m1( @@ -631,9 +569,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmerge_vvm_u16m1(vbool16_t mask, vuint16m1_t op1, - vuint16m1_t op2, size_t vl) { - return vmerge_vvm_u16m1(mask, op1, op2, vl); +vuint16m1_t test_vmerge_vvm_u16m1 (vuint16m1_t op1, vuint16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_u16m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m1( @@ -641,9 +578,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmerge_vxm_u16m1(vbool16_t mask, vuint16m1_t op1, uint16_t op2, - size_t vl) { - return vmerge_vxm_u16m1(mask, op1, op2, vl); +vuint16m1_t test_vmerge_vxm_u16m1 (vuint16m1_t op1, uint16_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_u16m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m2( @@ -651,9 +587,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmerge_vvm_u16m2(vbool8_t mask, vuint16m2_t op1, - vuint16m2_t op2, size_t vl) { - return vmerge_vvm_u16m2(mask, op1, op2, vl); +vuint16m2_t test_vmerge_vvm_u16m2 (vuint16m2_t op1, vuint16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_u16m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m2( @@ -661,9 +596,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmerge_vxm_u16m2(vbool8_t mask, vuint16m2_t op1, uint16_t op2, - size_t vl) { - return vmerge_vxm_u16m2(mask, op1, op2, vl); +vuint16m2_t test_vmerge_vxm_u16m2 (vuint16m2_t op1, uint16_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_u16m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m4( @@ -671,9 +605,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmerge_vvm_u16m4(vbool4_t mask, vuint16m4_t op1, - vuint16m4_t op2, size_t vl) { - return vmerge_vvm_u16m4(mask, op1, op2, vl); +vuint16m4_t test_vmerge_vvm_u16m4 (vuint16m4_t op1, vuint16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_u16m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m4( @@ -681,9 +614,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmerge_vxm_u16m4(vbool4_t mask, vuint16m4_t op1, uint16_t op2, - size_t vl) { - return vmerge_vxm_u16m4(mask, op1, op2, vl); +vuint16m4_t test_vmerge_vxm_u16m4 (vuint16m4_t op1, uint16_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_u16m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m8( @@ -691,9 +623,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmerge_vvm_u16m8(vbool2_t mask, vuint16m8_t op1, - vuint16m8_t op2, size_t vl) { - return vmerge_vvm_u16m8(mask, op1, op2, vl); +vuint16m8_t test_vmerge_vvm_u16m8 (vuint16m8_t op1, vuint16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_u16m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m8( @@ -701,9 +632,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmerge_vxm_u16m8(vbool2_t mask, vuint16m8_t op1, uint16_t op2, - size_t vl) { - return vmerge_vxm_u16m8(mask, op1, op2, vl); +vuint16m8_t test_vmerge_vxm_u16m8 (vuint16m8_t op1, uint16_t op2, vbool2_t mask, size_t vl) { + return vmerge_vxm_u16m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2( @@ -711,9 +641,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmerge_vvm_u32mf2(vbool64_t mask, vuint32mf2_t op1, - vuint32mf2_t op2, size_t vl) { - return vmerge_vvm_u32mf2(mask, op1, op2, vl); +vuint32mf2_t test_vmerge_vvm_u32mf2 (vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_u32mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2( @@ -721,9 +650,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmerge_vxm_u32mf2(vbool64_t mask, vuint32mf2_t op1, - uint32_t op2, size_t vl) { - return vmerge_vxm_u32mf2(mask, op1, op2, vl); +vuint32mf2_t test_vmerge_vxm_u32mf2 (vuint32mf2_t op1, uint32_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_u32mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m1( @@ -731,9 +659,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmerge_vvm_u32m1(vbool32_t mask, vuint32m1_t op1, - vuint32m1_t op2, size_t vl) { - return vmerge_vvm_u32m1(mask, op1, op2, vl); +vuint32m1_t test_vmerge_vvm_u32m1 (vuint32m1_t op1, vuint32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_u32m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m1( @@ -741,9 +668,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmerge_vxm_u32m1(vbool32_t mask, vuint32m1_t op1, uint32_t op2, - size_t vl) { - return vmerge_vxm_u32m1(mask, op1, op2, vl); +vuint32m1_t test_vmerge_vxm_u32m1 (vuint32m1_t op1, uint32_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_u32m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m2( @@ -751,9 +677,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmerge_vvm_u32m2(vbool16_t mask, vuint32m2_t op1, - vuint32m2_t op2, size_t vl) { - return vmerge_vvm_u32m2(mask, op1, op2, vl); +vuint32m2_t test_vmerge_vvm_u32m2 (vuint32m2_t op1, vuint32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_u32m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m2( @@ -761,9 +686,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmerge_vxm_u32m2(vbool16_t mask, vuint32m2_t op1, uint32_t op2, - size_t vl) { - return vmerge_vxm_u32m2(mask, op1, op2, vl); +vuint32m2_t test_vmerge_vxm_u32m2 (vuint32m2_t op1, uint32_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_u32m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m4( @@ -771,9 +695,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmerge_vvm_u32m4(vbool8_t mask, vuint32m4_t op1, - vuint32m4_t op2, size_t vl) { - return vmerge_vvm_u32m4(mask, op1, op2, vl); +vuint32m4_t test_vmerge_vvm_u32m4 (vuint32m4_t op1, vuint32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_u32m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m4( @@ -781,9 +704,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmerge_vxm_u32m4(vbool8_t mask, vuint32m4_t op1, uint32_t op2, - size_t vl) { - return vmerge_vxm_u32m4(mask, op1, op2, vl); +vuint32m4_t test_vmerge_vxm_u32m4 (vuint32m4_t op1, uint32_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_u32m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m8( @@ -791,9 +713,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmerge_vvm_u32m8(vbool4_t mask, vuint32m8_t op1, - vuint32m8_t op2, size_t vl) { - return vmerge_vvm_u32m8(mask, op1, op2, vl); +vuint32m8_t test_vmerge_vvm_u32m8 (vuint32m8_t op1, vuint32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_u32m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m8( @@ -801,9 +722,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmerge_vxm_u32m8(vbool4_t mask, vuint32m8_t op1, uint32_t op2, - size_t vl) { - return vmerge_vxm_u32m8(mask, op1, op2, vl); +vuint32m8_t test_vmerge_vxm_u32m8 (vuint32m8_t op1, uint32_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_u32m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m1( @@ -811,9 +731,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmerge_vvm_u64m1(vbool64_t mask, vuint64m1_t op1, - vuint64m1_t op2, size_t vl) { - return vmerge_vvm_u64m1(mask, op1, op2, vl); +vuint64m1_t test_vmerge_vvm_u64m1 (vuint64m1_t op1, vuint64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_u64m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m1( @@ -821,9 +740,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmerge_vxm_u64m1(vbool64_t mask, vuint64m1_t op1, uint64_t op2, - size_t vl) { - return vmerge_vxm_u64m1(mask, op1, op2, vl); +vuint64m1_t test_vmerge_vxm_u64m1 (vuint64m1_t op1, uint64_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_u64m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m2( @@ -831,9 +749,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmerge_vvm_u64m2(vbool32_t mask, vuint64m2_t op1, - vuint64m2_t op2, size_t vl) { - return vmerge_vvm_u64m2(mask, op1, op2, vl); +vuint64m2_t test_vmerge_vvm_u64m2 (vuint64m2_t op1, vuint64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_u64m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m2( @@ -841,9 +758,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmerge_vxm_u64m2(vbool32_t mask, vuint64m2_t op1, uint64_t op2, - size_t vl) { - return vmerge_vxm_u64m2(mask, op1, op2, vl); +vuint64m2_t test_vmerge_vxm_u64m2 (vuint64m2_t op1, uint64_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_u64m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m4( @@ -851,9 +767,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmerge_vvm_u64m4(vbool16_t mask, vuint64m4_t op1, - vuint64m4_t op2, size_t vl) { - return vmerge_vvm_u64m4(mask, op1, op2, vl); +vuint64m4_t test_vmerge_vvm_u64m4 (vuint64m4_t op1, vuint64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_u64m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m4( @@ -861,9 +776,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmerge_vxm_u64m4(vbool16_t mask, vuint64m4_t op1, uint64_t op2, - size_t vl) { - return vmerge_vxm_u64m4(mask, op1, op2, vl); +vuint64m4_t test_vmerge_vxm_u64m4 (vuint64m4_t op1, uint64_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_u64m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m8( @@ -871,9 +785,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmerge_vvm_u64m8(vbool8_t mask, vuint64m8_t op1, - vuint64m8_t op2, size_t vl) { - return vmerge_vvm_u64m8(mask, op1, op2, vl); +vuint64m8_t test_vmerge_vvm_u64m8 (vuint64m8_t op1, vuint64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_u64m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m8( @@ -881,9 +794,62 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmerge_vxm_u64m8(vbool8_t mask, vuint64m8_t op1, uint64_t op2, - size_t vl) { - return vmerge_vxm_u64m8(mask, op1, op2, vl); +vuint64m8_t test_vmerge_vxm_u64m8 (vuint64m8_t op1, uint64_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_u64m8(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vmerge_vvm_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_f16mf4(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vmerge_vvm_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_f16mf2(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vmerge_vvm_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_f16m1(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vmerge_vvm_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_f16m2(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vmerge_vvm_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_f16m4(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vmerge_vvm_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_f16m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2( @@ -891,9 +857,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vmerge_vvm_f32mf2(vbool64_t mask, vfloat32mf2_t op1, - vfloat32mf2_t op2, size_t vl) { - return vmerge_vvm_f32mf2(mask, op1, op2, vl); +vfloat32mf2_t test_vmerge_vvm_f32mf2 (vfloat32mf2_t op1, vfloat32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_f32mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m1( @@ -901,9 +866,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vmerge_vvm_f32m1(vbool32_t mask, vfloat32m1_t op1, - vfloat32m1_t op2, size_t vl) { - return vmerge_vvm_f32m1(mask, op1, op2, vl); +vfloat32m1_t test_vmerge_vvm_f32m1 (vfloat32m1_t op1, vfloat32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_f32m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m2( @@ -911,9 +875,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vmerge_vvm_f32m2(vbool16_t mask, vfloat32m2_t op1, - vfloat32m2_t op2, size_t vl) { - return vmerge_vvm_f32m2(mask, op1, op2, vl); +vfloat32m2_t test_vmerge_vvm_f32m2 (vfloat32m2_t op1, vfloat32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_f32m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m4( @@ -921,9 +884,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vmerge_vvm_f32m4(vbool8_t mask, vfloat32m4_t op1, - vfloat32m4_t op2, size_t vl) { - return vmerge_vvm_f32m4(mask, op1, op2, vl); +vfloat32m4_t test_vmerge_vvm_f32m4 (vfloat32m4_t op1, vfloat32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_f32m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m8( @@ -931,9 +893,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vmerge_vvm_f32m8(vbool4_t mask, vfloat32m8_t op1, - vfloat32m8_t op2, size_t vl) { - return vmerge_vvm_f32m8(mask, op1, op2, vl); +vfloat32m8_t test_vmerge_vvm_f32m8 (vfloat32m8_t op1, vfloat32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_f32m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m1( @@ -941,9 +902,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vmerge_vvm_f64m1(vbool64_t mask, vfloat64m1_t op1, - vfloat64m1_t op2, size_t vl) { - return vmerge_vvm_f64m1(mask, op1, op2, vl); +vfloat64m1_t test_vmerge_vvm_f64m1 (vfloat64m1_t op1, vfloat64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_f64m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m2( @@ -951,9 +911,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vmerge_vvm_f64m2(vbool32_t mask, vfloat64m2_t op1, - vfloat64m2_t op2, size_t vl) { - return vmerge_vvm_f64m2(mask, op1, op2, vl); +vfloat64m2_t test_vmerge_vvm_f64m2 (vfloat64m2_t op1, vfloat64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_f64m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m4( @@ -961,9 +920,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vmerge_vvm_f64m4(vbool16_t mask, vfloat64m4_t op1, - vfloat64m4_t op2, size_t vl) { - return vmerge_vvm_f64m4(mask, op1, op2, vl); +vfloat64m4_t test_vmerge_vvm_f64m4 (vfloat64m4_t op1, vfloat64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_f64m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m8( @@ -971,63 +929,242 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vmerge_vvm_f64m8(vbool8_t mask, vfloat64m8_t op1, - vfloat64m8_t op2, size_t vl) { - return vmerge_vvm_f64m8(mask, op1, op2, vl); +vfloat64m8_t test_vmerge_vvm_f64m8 (vfloat64m8_t op1, vfloat64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_f64m8(op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf4( +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vmerge_vvm_f16mf4(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmerge_vvm_f16mf4(mask, op1, op2, vl); +vint8mf8_t test_vmerge_vvm_i8mf8_tu (vint8mf8_t merge, vint8mf8_t op1, vint8mf8_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_i8mf8_tu(merge, op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf2( +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vmerge_vvm_f16mf2(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmerge_vvm_f16mf2(mask, op1, op2, vl); +vint8mf8_t test_vmerge_vxm_i8mf8_tu (vint8mf8_t merge, vint8mf8_t op1, int8_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_i8mf8_tu(merge, op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m1( +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vmerge_vvm_f16m1(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmerge_vvm_f16m1(mask, op1, op2, vl); +vint8mf4_t test_vmerge_vvm_i8mf4_tu (vint8mf4_t merge, vint8mf4_t op1, vint8mf4_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_i8mf4_tu(merge, op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m2( +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vmerge_vvm_f16m2(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmerge_vvm_f16m2(mask, op1, op2, vl); +vint8mf4_t test_vmerge_vxm_i8mf4_tu (vint8mf4_t merge, vint8mf4_t op1, int8_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_i8mf4_tu(merge, op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m4( +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vmerge_vvm_f16m4(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmerge_vvm_f16m4(mask, op1, op2, vl); +vint8mf2_t test_vmerge_vvm_i8mf2_tu (vint8mf2_t merge, vint8mf2_t op1, vint8mf2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_i8mf2_tu(merge, op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m8( +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmerge_vxm_i8mf2_tu (vint8mf2_t merge, vint8mf2_t op1, int8_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_i8mf2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmerge_vvm_i8m1_tu (vint8m1_t merge, vint8m1_t op1, vint8m1_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_i8m1_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmerge_vxm_i8m1_tu (vint8m1_t merge, vint8m1_t op1, int8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_i8m1_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmerge_vvm_i8m2_tu (vint8m2_t merge, vint8m2_t op1, vint8m2_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_i8m2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmerge_vxm_i8m2_tu (vint8m2_t merge, vint8m2_t op1, int8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_i8m2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmerge_vvm_i8m4_tu (vint8m4_t merge, vint8m4_t op1, vint8m4_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_i8m4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmerge_vxm_i8m4_tu (vint8m4_t merge, vint8m4_t op1, int8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vxm_i8m4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmerge_vvm_i8m8_tu (vint8m8_t merge, vint8m8_t op1, vint8m8_t op2, vbool1_t mask, size_t vl) { + return vmerge_vvm_i8m8_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmerge_vxm_i8m8_tu (vint8m8_t merge, vint8m8_t op1, int8_t op2, vbool1_t mask, size_t vl) { + return vmerge_vxm_i8m8_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmerge_vvm_i16mf4_tu (vint16mf4_t merge, vint16mf4_t op1, vint16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_i16mf4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmerge_vxm_i16mf4_tu (vint16mf4_t merge, vint16mf4_t op1, int16_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_i16mf4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmerge_vvm_i16mf2_tu (vint16mf2_t merge, vint16mf2_t op1, vint16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_i16mf2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmerge_vxm_i16mf2_tu (vint16mf2_t merge, vint16mf2_t op1, int16_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_i16mf2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmerge_vvm_i16m1_tu (vint16m1_t merge, vint16m1_t op1, vint16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_i16m1_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmerge_vxm_i16m1_tu (vint16m1_t merge, vint16m1_t op1, int16_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_i16m1_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmerge_vvm_i16m2_tu (vint16m2_t merge, vint16m2_t op1, vint16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_i16m2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmerge_vxm_i16m2_tu (vint16m2_t merge, vint16m2_t op1, int16_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_i16m2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmerge_vvm_i16m4_tu (vint16m4_t merge, vint16m4_t op1, vint16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_i16m4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmerge_vxm_i16m4_tu (vint16m4_t merge, vint16m4_t op1, int16_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_i16m4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmerge_vvm_i16m8_tu (vint16m8_t merge, vint16m8_t op1, vint16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_i16m8_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vmerge_vvm_f16m8(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmerge_vvm_f16m8(mask, op1, op2, vl); +vint16m8_t test_vmerge_vxm_i16m8_tu (vint16m8_t merge, vint16m8_t op1, int16_t op2, vbool2_t mask, size_t vl) { + return vmerge_vxm_i16m8_tu(merge, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2_tu( @@ -1035,8 +1172,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmerge_vvm_i32mf2_tu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmerge_vvm_i32mf2_tu(mask, merge, op1, op2, vl); +vint32mf2_t test_vmerge_vvm_i32mf2_tu (vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_i32mf2_tu(merge, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2_tu( @@ -1044,78 +1181,1608 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmerge_vxm_i32mf2_tu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmerge_vxm_i32mf2_tu(mask, merge, op1, op2, vl); +vint32mf2_t test_vmerge_vxm_i32mf2_tu (vint32mf2_t merge, vint32mf2_t op1, int32_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_i32mf2_tu(merge, op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2_tu( +// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmerge_vvm_u32mf2_tu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmerge_vvm_u32mf2_tu(mask, merge, op1, op2, vl); +vint32m1_t test_vmerge_vvm_i32m1_tu (vint32m1_t merge, vint32m1_t op1, vint32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_i32m1_tu(merge, op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2_tu( +// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmerge_vxm_u32mf2_tu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmerge_vxm_u32mf2_tu(mask, merge, op1, op2, vl); +vint32m1_t test_vmerge_vxm_i32m1_tu (vint32m1_t merge, vint32m1_t op1, int32_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_i32m1_tu(merge, op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2_ta( +// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmerge_vvm_i32mf2_ta(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmerge_vvm_i32mf2_ta(mask, op1, op2, vl); +vint32m2_t test_vmerge_vvm_i32m2_tu (vint32m2_t merge, vint32m2_t op1, vint32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_i32m2_tu(merge, op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2_ta( +// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmerge_vxm_i32mf2_ta(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmerge_vxm_i32mf2_ta(mask, op1, op2, vl); +vint32m2_t test_vmerge_vxm_i32m2_tu (vint32m2_t merge, vint32m2_t op1, int32_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_i32m2_tu(merge, op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2_ta( +// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmerge_vvm_u32mf2_ta(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmerge_vvm_u32mf2_ta(mask, op1, op2, vl); +vint32m4_t test_vmerge_vvm_i32m4_tu (vint32m4_t merge, vint32m4_t op1, vint32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_i32m4_tu(merge, op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2_ta( +// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmerge_vxm_u32mf2_ta(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmerge_vxm_u32mf2_ta(mask, op1, op2, vl); +vint32m4_t test_vmerge_vxm_i32m4_tu (vint32m4_t merge, vint32m4_t op1, int32_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_i32m4_tu(merge, op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2_tu( +// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vmerge_vvm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmerge_vvm_f32mf2_tu(mask, merge, op1, op2, vl); +vint32m8_t test_vmerge_vvm_i32m8_tu (vint32m8_t merge, vint32m8_t op1, vint32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_i32m8_tu(merge, op1, op2, mask, vl); } -// CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2_ta( +// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmerge_vxm_i32m8_tu (vint32m8_t merge, vint32m8_t op1, int32_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_i32m8_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmerge_vvm_i64m1_tu (vint64m1_t merge, vint64m1_t op1, vint64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_i64m1_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmerge_vxm_i64m1_tu (vint64m1_t merge, vint64m1_t op1, int64_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_i64m1_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmerge_vvm_i64m2_tu (vint64m2_t merge, vint64m2_t op1, vint64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_i64m2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmerge_vxm_i64m2_tu (vint64m2_t merge, vint64m2_t op1, int64_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_i64m2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmerge_vvm_i64m4_tu (vint64m4_t merge, vint64m4_t op1, vint64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_i64m4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmerge_vxm_i64m4_tu (vint64m4_t merge, vint64m4_t op1, int64_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_i64m4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmerge_vvm_i64m8_tu (vint64m8_t merge, vint64m8_t op1, vint64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_i64m8_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmerge_vxm_i64m8_tu (vint64m8_t merge, vint64m8_t op1, int64_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_i64m8_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmerge_vvm_u8mf8_tu (vuint8mf8_t merge, vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_u8mf8_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmerge_vxm_u8mf8_tu (vuint8mf8_t merge, vuint8mf8_t op1, uint8_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_u8mf8_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmerge_vvm_u8mf4_tu (vuint8mf4_t merge, vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_u8mf4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmerge_vxm_u8mf4_tu (vuint8mf4_t merge, vuint8mf4_t op1, uint8_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_u8mf4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmerge_vvm_u8mf2_tu (vuint8mf2_t merge, vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_u8mf2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmerge_vxm_u8mf2_tu (vuint8mf2_t merge, vuint8mf2_t op1, uint8_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_u8mf2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmerge_vvm_u8m1_tu (vuint8m1_t merge, vuint8m1_t op1, vuint8m1_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_u8m1_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmerge_vxm_u8m1_tu (vuint8m1_t merge, vuint8m1_t op1, uint8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_u8m1_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmerge_vvm_u8m2_tu (vuint8m2_t merge, vuint8m2_t op1, vuint8m2_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_u8m2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmerge_vxm_u8m2_tu (vuint8m2_t merge, vuint8m2_t op1, uint8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_u8m2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmerge_vvm_u8m4_tu (vuint8m4_t merge, vuint8m4_t op1, vuint8m4_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_u8m4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmerge_vxm_u8m4_tu (vuint8m4_t merge, vuint8m4_t op1, uint8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vxm_u8m4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmerge_vvm_u8m8_tu (vuint8m8_t merge, vuint8m8_t op1, vuint8m8_t op2, vbool1_t mask, size_t vl) { + return vmerge_vvm_u8m8_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( [[MERGE:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmerge_vxm_u8m8_tu (vuint8m8_t merge, vuint8m8_t op1, uint8_t op2, vbool1_t mask, size_t vl) { + return vmerge_vxm_u8m8_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmerge_vvm_u16mf4_tu (vuint16mf4_t merge, vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_u16mf4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmerge_vxm_u16mf4_tu (vuint16mf4_t merge, vuint16mf4_t op1, uint16_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_u16mf4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmerge_vvm_u16mf2_tu (vuint16mf2_t merge, vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_u16mf2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmerge_vxm_u16mf2_tu (vuint16mf2_t merge, vuint16mf2_t op1, uint16_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_u16mf2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmerge_vvm_u16m1_tu (vuint16m1_t merge, vuint16m1_t op1, vuint16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_u16m1_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmerge_vxm_u16m1_tu (vuint16m1_t merge, vuint16m1_t op1, uint16_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_u16m1_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmerge_vvm_u16m2_tu (vuint16m2_t merge, vuint16m2_t op1, vuint16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_u16m2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmerge_vxm_u16m2_tu (vuint16m2_t merge, vuint16m2_t op1, uint16_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_u16m2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmerge_vvm_u16m4_tu (vuint16m4_t merge, vuint16m4_t op1, vuint16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_u16m4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmerge_vxm_u16m4_tu (vuint16m4_t merge, vuint16m4_t op1, uint16_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_u16m4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmerge_vvm_u16m8_tu (vuint16m8_t merge, vuint16m8_t op1, vuint16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_u16m8_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( [[MERGE:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmerge_vxm_u16m8_tu (vuint16m8_t merge, vuint16m8_t op1, uint16_t op2, vbool2_t mask, size_t vl) { + return vmerge_vxm_u16m8_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmerge_vvm_u32mf2_tu (vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_u32mf2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmerge_vxm_u32mf2_tu (vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_u32mf2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmerge_vvm_u32m1_tu (vuint32m1_t merge, vuint32m1_t op1, vuint32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_u32m1_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmerge_vxm_u32m1_tu (vuint32m1_t merge, vuint32m1_t op1, uint32_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_u32m1_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmerge_vvm_u32m2_tu (vuint32m2_t merge, vuint32m2_t op1, vuint32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_u32m2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmerge_vxm_u32m2_tu (vuint32m2_t merge, vuint32m2_t op1, uint32_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_u32m2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmerge_vvm_u32m4_tu (vuint32m4_t merge, vuint32m4_t op1, vuint32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_u32m4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmerge_vxm_u32m4_tu (vuint32m4_t merge, vuint32m4_t op1, uint32_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_u32m4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmerge_vvm_u32m8_tu (vuint32m8_t merge, vuint32m8_t op1, vuint32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_u32m8_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmerge_vxm_u32m8_tu (vuint32m8_t merge, vuint32m8_t op1, uint32_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_u32m8_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmerge_vvm_u64m1_tu (vuint64m1_t merge, vuint64m1_t op1, vuint64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_u64m1_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmerge_vxm_u64m1_tu (vuint64m1_t merge, vuint64m1_t op1, uint64_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_u64m1_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmerge_vvm_u64m2_tu (vuint64m2_t merge, vuint64m2_t op1, vuint64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_u64m2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmerge_vxm_u64m2_tu (vuint64m2_t merge, vuint64m2_t op1, uint64_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_u64m2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmerge_vvm_u64m4_tu (vuint64m4_t merge, vuint64m4_t op1, vuint64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_u64m4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmerge_vxm_u64m4_tu (vuint64m4_t merge, vuint64m4_t op1, uint64_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_u64m4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmerge_vvm_u64m8_tu (vuint64m8_t merge, vuint64m8_t op1, vuint64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_u64m8_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmerge_vxm_u64m8_tu (vuint64m8_t merge, vuint64m8_t op1, uint64_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_u64m8_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmerge_vvm_i8mf8_ta (vint8mf8_t op1, vint8mf8_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_i8mf8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmerge_vxm_i8mf8_ta (vint8mf8_t op1, int8_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_i8mf8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmerge_vvm_i8mf4_ta (vint8mf4_t op1, vint8mf4_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_i8mf4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmerge_vxm_i8mf4_ta (vint8mf4_t op1, int8_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_i8mf4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmerge_vvm_i8mf2_ta (vint8mf2_t op1, vint8mf2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_i8mf2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmerge_vxm_i8mf2_ta (vint8mf2_t op1, int8_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_i8mf2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmerge_vvm_i8m1_ta (vint8m1_t op1, vint8m1_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_i8m1_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmerge_vxm_i8m1_ta (vint8m1_t op1, int8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_i8m1_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmerge_vvm_i8m2_ta (vint8m2_t op1, vint8m2_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_i8m2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmerge_vxm_i8m2_ta (vint8m2_t op1, int8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_i8m2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmerge_vvm_i8m4_ta (vint8m4_t op1, vint8m4_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_i8m4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmerge_vxm_i8m4_ta (vint8m4_t op1, int8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vxm_i8m4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmerge_vvm_i8m8_ta (vint8m8_t op1, vint8m8_t op2, vbool1_t mask, size_t vl) { + return vmerge_vvm_i8m8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmerge_vxm_i8m8_ta (vint8m8_t op1, int8_t op2, vbool1_t mask, size_t vl) { + return vmerge_vxm_i8m8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmerge_vvm_i16mf4_ta (vint16mf4_t op1, vint16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_i16mf4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmerge_vxm_i16mf4_ta (vint16mf4_t op1, int16_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_i16mf4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmerge_vvm_i16mf2_ta (vint16mf2_t op1, vint16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_i16mf2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmerge_vxm_i16mf2_ta (vint16mf2_t op1, int16_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_i16mf2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmerge_vvm_i16m1_ta (vint16m1_t op1, vint16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_i16m1_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmerge_vxm_i16m1_ta (vint16m1_t op1, int16_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_i16m1_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmerge_vvm_i16m2_ta (vint16m2_t op1, vint16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_i16m2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmerge_vxm_i16m2_ta (vint16m2_t op1, int16_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_i16m2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmerge_vvm_i16m4_ta (vint16m4_t op1, vint16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_i16m4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmerge_vxm_i16m4_ta (vint16m4_t op1, int16_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_i16m4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmerge_vvm_i16m8_ta (vint16m8_t op1, vint16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_i16m8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmerge_vxm_i16m8_ta (vint16m8_t op1, int16_t op2, vbool2_t mask, size_t vl) { + return vmerge_vxm_i16m8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmerge_vvm_i32mf2_ta (vint32mf2_t op1, vint32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_i32mf2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmerge_vxm_i32mf2_ta (vint32mf2_t op1, int32_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_i32mf2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmerge_vvm_i32m1_ta (vint32m1_t op1, vint32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_i32m1_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmerge_vxm_i32m1_ta (vint32m1_t op1, int32_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_i32m1_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmerge_vvm_i32m2_ta (vint32m2_t op1, vint32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_i32m2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmerge_vxm_i32m2_ta (vint32m2_t op1, int32_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_i32m2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmerge_vvm_i32m4_ta (vint32m4_t op1, vint32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_i32m4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmerge_vxm_i32m4_ta (vint32m4_t op1, int32_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_i32m4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmerge_vvm_i32m8_ta (vint32m8_t op1, vint32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_i32m8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmerge_vxm_i32m8_ta (vint32m8_t op1, int32_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_i32m8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmerge_vvm_i64m1_ta (vint64m1_t op1, vint64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_i64m1_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmerge_vxm_i64m1_ta (vint64m1_t op1, int64_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_i64m1_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmerge_vvm_i64m2_ta (vint64m2_t op1, vint64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_i64m2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmerge_vxm_i64m2_ta (vint64m2_t op1, int64_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_i64m2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmerge_vvm_i64m4_ta (vint64m4_t op1, vint64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_i64m4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmerge_vxm_i64m4_ta (vint64m4_t op1, int64_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_i64m4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmerge_vvm_i64m8_ta (vint64m8_t op1, vint64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_i64m8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmerge_vxm_i64m8_ta (vint64m8_t op1, int64_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_i64m8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmerge_vvm_u8mf8_ta (vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_u8mf8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmerge_vxm_u8mf8_ta (vuint8mf8_t op1, uint8_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_u8mf8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmerge_vvm_u8mf4_ta (vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_u8mf4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmerge_vxm_u8mf4_ta (vuint8mf4_t op1, uint8_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_u8mf4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmerge_vvm_u8mf2_ta (vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_u8mf2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmerge_vxm_u8mf2_ta (vuint8mf2_t op1, uint8_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_u8mf2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmerge_vvm_u8m1_ta (vuint8m1_t op1, vuint8m1_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_u8m1_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmerge_vxm_u8m1_ta (vuint8m1_t op1, uint8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_u8m1_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmerge_vvm_u8m2_ta (vuint8m2_t op1, vuint8m2_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_u8m2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmerge_vxm_u8m2_ta (vuint8m2_t op1, uint8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_u8m2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmerge_vvm_u8m4_ta (vuint8m4_t op1, vuint8m4_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_u8m4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmerge_vxm_u8m4_ta (vuint8m4_t op1, uint8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vxm_u8m4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmerge_vvm_u8m8_ta (vuint8m8_t op1, vuint8m8_t op2, vbool1_t mask, size_t vl) { + return vmerge_vvm_u8m8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmerge_vxm_u8m8_ta (vuint8m8_t op1, uint8_t op2, vbool1_t mask, size_t vl) { + return vmerge_vxm_u8m8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmerge_vvm_u16mf4_ta (vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_u16mf4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmerge_vxm_u16mf4_ta (vuint16mf4_t op1, uint16_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_u16mf4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmerge_vvm_u16mf2_ta (vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_u16mf2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmerge_vxm_u16mf2_ta (vuint16mf2_t op1, uint16_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_u16mf2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmerge_vvm_u16m1_ta (vuint16m1_t op1, vuint16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_u16m1_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmerge_vxm_u16m1_ta (vuint16m1_t op1, uint16_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_u16m1_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmerge_vvm_u16m2_ta (vuint16m2_t op1, vuint16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_u16m2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmerge_vxm_u16m2_ta (vuint16m2_t op1, uint16_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_u16m2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmerge_vvm_u16m4_ta (vuint16m4_t op1, vuint16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_u16m4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmerge_vxm_u16m4_ta (vuint16m4_t op1, uint16_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_u16m4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmerge_vvm_u16m8_ta (vuint16m8_t op1, vuint16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_u16m8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmerge_vxm_u16m8_ta (vuint16m8_t op1, uint16_t op2, vbool2_t mask, size_t vl) { + return vmerge_vxm_u16m8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmerge_vvm_u32mf2_ta (vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_u32mf2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmerge_vxm_u32mf2_ta (vuint32mf2_t op1, uint32_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_u32mf2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmerge_vvm_u32m1_ta (vuint32m1_t op1, vuint32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_u32m1_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmerge_vxm_u32m1_ta (vuint32m1_t op1, uint32_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_u32m1_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmerge_vvm_u32m2_ta (vuint32m2_t op1, vuint32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_u32m2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmerge_vxm_u32m2_ta (vuint32m2_t op1, uint32_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_u32m2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmerge_vvm_u32m4_ta (vuint32m4_t op1, vuint32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_u32m4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmerge_vxm_u32m4_ta (vuint32m4_t op1, uint32_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_u32m4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmerge_vvm_u32m8_ta (vuint32m8_t op1, vuint32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_u32m8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmerge_vxm_u32m8_ta (vuint32m8_t op1, uint32_t op2, vbool4_t mask, size_t vl) { + return vmerge_vxm_u32m8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmerge_vvm_u64m1_ta (vuint64m1_t op1, vuint64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_u64m1_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmerge_vxm_u64m1_ta (vuint64m1_t op1, uint64_t op2, vbool64_t mask, size_t vl) { + return vmerge_vxm_u64m1_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmerge_vvm_u64m2_ta (vuint64m2_t op1, vuint64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_u64m2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmerge_vxm_u64m2_ta (vuint64m2_t op1, uint64_t op2, vbool32_t mask, size_t vl) { + return vmerge_vxm_u64m2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmerge_vvm_u64m4_ta (vuint64m4_t op1, vuint64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_u64m4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmerge_vxm_u64m4_ta (vuint64m4_t op1, uint64_t op2, vbool16_t mask, size_t vl) { + return vmerge_vxm_u64m4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmerge_vvm_u64m8_ta (vuint64m8_t op1, vuint64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_u64m8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmerge_vxm_u64m8_ta (vuint64m8_t op1, uint64_t op2, vbool8_t mask, size_t vl) { + return vmerge_vxm_u64m8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f16.nxv1f16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vmerge_vvm_f16mf4_tu (vfloat16mf4_t merge, vfloat16mf4_t op1, vfloat16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_f16mf4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f16.nxv2f16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vmerge_vvm_f16mf2_tu (vfloat16mf2_t merge, vfloat16mf2_t op1, vfloat16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_f16mf2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f16.nxv4f16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vmerge_vvm_f16m1_tu (vfloat16m1_t merge, vfloat16m1_t op1, vfloat16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_f16m1_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f16.nxv8f16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vmerge_vvm_f16m2_tu (vfloat16m2_t merge, vfloat16m2_t op1, vfloat16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_f16m2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f16.nxv16f16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vmerge_vvm_f16m4_tu (vfloat16m4_t merge, vfloat16m4_t op1, vfloat16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_f16m4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32f16.nxv32f16.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vmerge_vvm_f16m8_tu (vfloat16m8_t merge, vfloat16m8_t op1, vfloat16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_f16m8_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vmerge_vvm_f32mf2_tu (vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_f32mf2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f32.nxv2f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vmerge_vvm_f32m1_tu (vfloat32m1_t merge, vfloat32m1_t op1, vfloat32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_f32m1_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f32.nxv4f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vmerge_vvm_f32m2_tu (vfloat32m2_t merge, vfloat32m2_t op1, vfloat32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_f32m2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f32.nxv8f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vmerge_vvm_f32m4_tu (vfloat32m4_t merge, vfloat32m4_t op1, vfloat32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_f32m4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f32.nxv16f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vmerge_vvm_f32m8_tu (vfloat32m8_t merge, vfloat32m8_t op1, vfloat32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_f32m8_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f64.nxv1f64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vmerge_vvm_f64m1_tu (vfloat64m1_t merge, vfloat64m1_t op1, vfloat64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_f64m1_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f64.nxv2f64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vmerge_vvm_f64m2_tu (vfloat64m2_t merge, vfloat64m2_t op1, vfloat64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_f64m2_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f64.nxv4f64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vmerge_vvm_f64m4_tu (vfloat64m4_t merge, vfloat64m4_t op1, vfloat64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_f64m4_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f64.nxv8f64.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vmerge_vvm_f64m8_tu (vfloat64m8_t merge, vfloat64m8_t op1, vfloat64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_f64m8_tu(merge, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vmerge_vvm_f16mf4_ta (vfloat16mf4_t op1, vfloat16mf4_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_f16mf4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vmerge_vvm_f16mf2_ta (vfloat16mf2_t op1, vfloat16mf2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_f16mf2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vmerge_vvm_f16m1_ta (vfloat16m1_t op1, vfloat16m1_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_f16m1_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vmerge_vvm_f16m2_ta (vfloat16m2_t op1, vfloat16m2_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_f16m2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vmerge_vvm_f16m4_ta (vfloat16m4_t op1, vfloat16m4_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_f16m4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vmerge_vvm_f16m8_ta (vfloat16m8_t op1, vfloat16m8_t op2, vbool2_t mask, size_t vl) { + return vmerge_vvm_f16m8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vmerge_vvm_f32mf2_ta (vfloat32mf2_t op1, vfloat32mf2_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_f32mf2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vmerge_vvm_f32m1_ta (vfloat32m1_t op1, vfloat32m1_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_f32m1_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vmerge_vvm_f32m2_ta (vfloat32m2_t op1, vfloat32m2_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_f32m2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vmerge_vvm_f32m4_ta (vfloat32m4_t op1, vfloat32m4_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_f32m4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vmerge_vvm_f32m8_ta (vfloat32m8_t op1, vfloat32m8_t op2, vbool4_t mask, size_t vl) { + return vmerge_vvm_f32m8_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vmerge_vvm_f64m1_ta (vfloat64m1_t op1, vfloat64m1_t op2, vbool64_t mask, size_t vl) { + return vmerge_vvm_f64m1_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vmerge_vvm_f64m2_ta (vfloat64m2_t op1, vfloat64m2_t op2, vbool32_t mask, size_t vl) { + return vmerge_vvm_f64m2_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vmerge_vvm_f64m4_ta (vfloat64m4_t op1, vfloat64m4_t op2, vbool16_t mask, size_t vl) { + return vmerge_vvm_f64m4_ta(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vmerge_vvm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmerge_vvm_f32mf2_ta(mask, op1, op2, vl); +vfloat64m8_t test_vmerge_vvm_f64m8_ta (vfloat64m8_t op1, vfloat64m8_t op2, vbool8_t mask, size_t vl) { + return vmerge_vvm_f64m8_ta(op1, op2, mask, vl); }